From dd2c06e308e5555c6a888d167e2b051cace1d364 Mon Sep 17 00:00:00 2001 From: Cyril Brulebois Date: Tue, 14 Feb 2023 22:32:27 +0000 Subject: [PATCH 1/1] Import crowdsec_1.4.2.orig.tar.gz [dgit import orig crowdsec_1.4.2.orig.tar.gz] --- .dockerignore | 3 + .github/ISSUE_TEMPLATE/bug_report.yaml | 136 + .github/ISSUE_TEMPLATE/config.yml | 4 + .github/ISSUE_TEMPLATE/feature_request.yaml | 19 + .github/release-drafter.yml | 31 + .github/workflows/.yamllint | 1 + .github/workflows/bats-hub.yml | 77 + .github/workflows/bats-mysql.yml | 96 + .github/workflows/bats-postgres.yml | 99 + .github/workflows/bats-sqlite-coverage.yml | 80 + .github/workflows/bats.yml | 50 + .github/workflows/ci-windows-build-msi.yml | 37 + .github/workflows/ci_golangci-lint.yml | 47 + .github/workflows/ci_release-drafter.yml | 20 + .github/workflows/codeql-analysis.yml | 72 + .github/workflows/dispatch_ci_hub.yaml | 21 + .../workflows/dispatch_create_branch_hub.yaml | 24 + .../workflows/dispatch_delete_branch_hub.yaml | 24 + .github/workflows/go-tests-windows.yml | 51 + .github/workflows/go-tests.yml | 143 + .github/workflows/release_publish-package.yml | 48 + .../release_publish_docker-image-debian.yml | 62 + .../release_publish_docker-image.yml | 89 + .github/workflows/update_docker_hub_doc.yml | 26 + .gitignore | 46 + .gitmodules | 16 + .golangci.yml | 221 + .yamllint | 43 + CONTRIBUTING.md | 3 + Dockerfile | 51 + Dockerfile.debian | 64 + LICENSE | 21 + Makefile | 213 + README.md | 162 + SECURITY.md | 31 + azure-pipelines.yml | 126 + cmd/crowdsec-cli/Makefile | 42 + cmd/crowdsec-cli/alerts.go | 445 ++ cmd/crowdsec-cli/alerts_table.go | 100 + cmd/crowdsec-cli/bouncers.go | 168 + cmd/crowdsec-cli/bouncers_table.go | 31 + cmd/crowdsec-cli/capi.go | 172 + cmd/crowdsec-cli/collections.go | 183 + cmd/crowdsec-cli/completion.go | 86 + cmd/crowdsec-cli/config.go | 506 ++ cmd/crowdsec-cli/console.go | 278 + cmd/crowdsec-cli/console_table.go | 48 + cmd/crowdsec-cli/dashboard.go | 336 + cmd/crowdsec-cli/decisions.go | 615 ++ cmd/crowdsec-cli/decisions_table.go | 46 + cmd/crowdsec-cli/explain.go | 149 + cmd/crowdsec-cli/hub.go | 142 + cmd/crowdsec-cli/hubtest.go | 537 ++ cmd/crowdsec-cli/hubtest_table.go | 80 + cmd/crowdsec-cli/lapi.go | 174 + cmd/crowdsec-cli/machines.go | 354 + cmd/crowdsec-cli/machines_table.go | 31 + cmd/crowdsec-cli/main.go | 242 + cmd/crowdsec-cli/main_test.go | 13 + cmd/crowdsec-cli/messages.go | 23 + cmd/crowdsec-cli/metrics.go | 289 + cmd/crowdsec-cli/metrics_table.go | 272 + cmd/crowdsec-cli/notifications.go | 332 + cmd/crowdsec-cli/notifications_table.go | 25 + cmd/crowdsec-cli/parsers.go | 174 + cmd/crowdsec-cli/postoverflows.go | 172 + cmd/crowdsec-cli/scenarios.go | 177 + cmd/crowdsec-cli/simulation.go | 251 + cmd/crowdsec-cli/support.go | 407 + cmd/crowdsec-cli/tables.go | 95 + cmd/crowdsec-cli/utils.go | 734 ++ cmd/crowdsec-cli/utils_table.go | 66 + cmd/crowdsec/Makefile | 76 + cmd/crowdsec/api.go | 81 + cmd/crowdsec/crowdsec.go | 253 + cmd/crowdsec/event_log_hook_windows.go | 39 + cmd/crowdsec/main.go | 328 + cmd/crowdsec/main_test.go | 13 + cmd/crowdsec/metrics.go | 190 + cmd/crowdsec/output.go | 174 + cmd/crowdsec/parse.go | 53 + cmd/crowdsec/pour.go | 64 + cmd/crowdsec/run_in_svc.go | 71 + cmd/crowdsec/run_in_svc_windows.go | 99 + cmd/crowdsec/serve.go | 357 + cmd/crowdsec/win_service.go | 125 + cmd/crowdsec/win_service_install.go | 95 + cmd/crowdsec/win_service_manage.go | 64 + config/acquis.yaml | 16 + config/acquis_win.yaml | 8 + config/config.yaml | 63 + config/config_win.yaml | 49 + config/config_win_no_lapi.yaml | 28 + config/console.yaml | 3 + config/crowdsec.cron.daily | 14 + config/crowdsec.service | 14 + config/dev.yaml | 48 + config/local_api_credentials.yaml | 1 + config/online_api_credentials.yaml | 0 config/patterns/aws | 11 + config/patterns/bacula | 50 + config/patterns/bro | 13 + config/patterns/cowrie_honeypot | 1 + config/patterns/exim | 12 + config/patterns/firewalls | 86 + config/patterns/haproxy | 39 + config/patterns/java | 20 + config/patterns/junos | 8 + config/patterns/linux-syslog | 16 + config/patterns/mcollective | 4 + config/patterns/modsecurity | 18 + config/patterns/mongodb | 7 + config/patterns/mysql | 1 + config/patterns/nagios | 124 + config/patterns/nginx | 19 + config/patterns/paths | 14 + config/patterns/postgresql | 2 + config/patterns/rails | 18 + config/patterns/redis | 21 + config/patterns/ruby | 2 + config/patterns/smb | 1 + config/patterns/ssh | 61 + config/patterns/tcpdump | 1 + config/profiles.yaml | 14 + config/simulation.yaml | 4 + config/user.yaml | 40 + debian/.gitignore | 7 + debian/README.md | 14 + debian/changelog | 87 + debian/compat | 1 + debian/control | 8 + debian/crowdsec.cron.daily | 1 + debian/crowdsec.service | 16 + debian/install | 12 + debian/patches/config_plugins | 13 + debian/patches/series | 1 + debian/postinst | 107 + debian/postrm | 4 + debian/preinst | 43 + debian/prerm | 9 + debian/rules | 53 + debian/templates | 23 + docker/README.md | 222 + docker/config.yaml | 56 + docker/docker_start.sh | 212 + go.mod | 177 + go.sum | 1180 +++ make_chocolatey.ps1 | 18 + make_installer.ps1 | 20 + pkg/acquisition/acquisition.go | 272 + pkg/acquisition/acquisition_test.go | 546 ++ .../configuration/configuration.go | 19 + .../modules/cloudwatch/cloudwatch.go | 686 ++ .../modules/cloudwatch/cloudwatch_test.go | 808 ++ pkg/acquisition/modules/docker/docker.go | 562 ++ pkg/acquisition/modules/docker/docker_test.go | 321 + pkg/acquisition/modules/file/file.go | 493 ++ pkg/acquisition/modules/file/file_test.go | 474 ++ pkg/acquisition/modules/file/tailline.go | 7 + .../modules/file/tailline_windows.go | 9 + .../modules/file/test_files/bad.gz | 1 + .../modules/file/test_files/test.log | 5 + .../modules/file/test_files/test.log.gz | Bin 0 -> 39 bytes .../modules/journalctl/journalctl.go | 264 + .../modules/journalctl/journalctl_test.go | 273 + .../modules/journalctl/test_files/journalctl | 45 + pkg/acquisition/modules/kafka/kafka.go | 242 + pkg/acquisition/modules/kafka/kafka_test.go | 262 + .../testdata/kafkaClient.certificate.pem | 23 + .../modules/kafka/testdata/kafkaClient.key | 32 + .../modules/kafka/testdata/snakeoil-ca-1.crt | 23 + pkg/acquisition/modules/kinesis/kinesis.go | 515 ++ .../modules/kinesis/kinesis_test.go | 334 + .../syslog/internal/parser/rfc3164/parse.go | 255 + .../internal/parser/rfc3164/parse_test.go | 368 + .../internal/parser/rfc3164/perf_test.go | 63 + .../syslog/internal/parser/rfc5424/parse.go | 398 + .../internal/parser/rfc5424/parse_test.go | 268 + .../internal/parser/rfc5424/perf_test.go | 104 + .../syslog/internal/parser/utils/utils.go | 76 + .../syslog/internal/server/syslogserver.go | 94 + pkg/acquisition/modules/syslog/syslog.go | 218 + pkg/acquisition/modules/syslog/syslog_test.go | 171 + .../modules/wineventlog/wineventlog.go | 59 + .../modules/wineventlog/wineventlog_test.go | 233 + .../wineventlog/wineventlog_windows.go | 320 + .../test_files/backward_compat.yaml | 15 + pkg/acquisition/test_files/bad_filetype.yaml | 5 + pkg/acquisition/test_files/bad_source.yaml | 4 + pkg/acquisition/test_files/badyaml.yaml | 1 + .../test_files/basic_filemode.yaml | 11 + pkg/acquisition/test_files/emptyitem.yaml | 1 + .../test_files/missing_labels.yaml | 2 + pkg/apiclient/alerts_service.go | 146 + pkg/apiclient/alerts_service_test.go | 494 ++ pkg/apiclient/auth.go | 229 + pkg/apiclient/auth_service.go | 80 + pkg/apiclient/auth_service_test.go | 238 + pkg/apiclient/auth_test.go | 86 + pkg/apiclient/client.go | 179 + pkg/apiclient/client_http.go | 119 + pkg/apiclient/client_http_test.go | 76 + pkg/apiclient/client_test.go | 205 + pkg/apiclient/config.go | 17 + pkg/apiclient/decisions_service.go | 141 + pkg/apiclient/decisions_service_test.go | 341 + pkg/apiclient/heartbeat.go | 62 + pkg/apiclient/metrics.go | 27 + pkg/apiclient/signal.go | 35 + pkg/apiserver/alerts_test.go | 510 ++ pkg/apiserver/api_key_test.go | 52 + pkg/apiserver/apic.go | 632 ++ pkg/apiserver/apic_test.go | 944 +++ pkg/apiserver/apiserver.go | 407 + pkg/apiserver/apiserver_test.go | 444 ++ pkg/apiserver/controllers/controller.go | 150 + pkg/apiserver/controllers/v1/alerts.go | 299 + pkg/apiserver/controllers/v1/controller.go | 63 + pkg/apiserver/controllers/v1/decisions.go | 229 + pkg/apiserver/controllers/v1/errors.go | 38 + pkg/apiserver/controllers/v1/heartbeat.go | 21 + pkg/apiserver/controllers/v1/machines.go | 31 + pkg/apiserver/controllers/v1/metrics.go | 120 + pkg/apiserver/controllers/v1/utils.go | 26 + pkg/apiserver/decisions_test.go | 353 + pkg/apiserver/heartbeat_test.go | 18 + pkg/apiserver/jwt_test.go | 95 + pkg/apiserver/machines_test.go | 169 + pkg/apiserver/middlewares/v1/api_key.go | 226 + pkg/apiserver/middlewares/v1/jwt.go | 286 + pkg/apiserver/middlewares/v1/middlewares.go | 22 + pkg/apiserver/middlewares/v1/tls_auth.go | 257 + .../alertWithInvalidMachineID_sample.json | 59 + pkg/apiserver/tests/alert_bulk.json | 5362 +++++++++++++ pkg/apiserver/tests/alert_duplicate.json | 266 + pkg/apiserver/tests/alert_minibulk+simul.json | 548 ++ pkg/apiserver/tests/alert_minibulk.json | 548 ++ pkg/apiserver/tests/alert_sample.json | 77 + pkg/apiserver/tests/alert_ssh-bf.json | 275 + pkg/apiserver/tests/alert_stream_fixture.json | 173 + pkg/apiserver/tests/invalidAlert_sample.json | 43 + pkg/apiserver/tests/profiles.yaml | 31 + pkg/apiserver/utils.go | 27 + pkg/csconfig/api.go | 250 + pkg/csconfig/api_test.go | 277 + pkg/csconfig/common.go | 47 + pkg/csconfig/common_test.go | 94 + pkg/csconfig/config.go | 145 + pkg/csconfig/config_paths.go | 58 + pkg/csconfig/config_test.go | 46 + pkg/csconfig/console.go | 83 + pkg/csconfig/crowdsec_service.go | 156 + pkg/csconfig/crowdsec_service_test.go | 193 + pkg/csconfig/cscli.go | 31 + pkg/csconfig/cscli_test.go | 84 + pkg/csconfig/database.go | 69 + pkg/csconfig/database_test.go | 65 + pkg/csconfig/hub.go | 24 + pkg/csconfig/hub_test.go | 94 + pkg/csconfig/plugin_config.go | 6 + pkg/csconfig/profiles.go | 57 + pkg/csconfig/prometheus.go | 21 + pkg/csconfig/prometheus_test.go | 55 + pkg/csconfig/simulation.go | 60 + pkg/csconfig/simulation_test.go | 140 + pkg/csconfig/tests/acquis.yaml | 0 pkg/csconfig/tests/acquis/acquis.yaml | 0 pkg/csconfig/tests/bad_lapi-secrets.yaml | 1 + .../tests/bad_online-api-secrets.yaml | 3 + pkg/csconfig/tests/config.yaml | 38 + pkg/csconfig/tests/lapi-secrets.yaml | 3 + pkg/csconfig/tests/online-api-secrets.yaml | 3 + pkg/csconfig/tests/profiles.yaml | 41 + pkg/csconfig/tests/simulation.yaml | 4 + pkg/csplugin/broker.go | 423 + pkg/csplugin/broker_test.go | 604 ++ pkg/csplugin/broker_win_test.go | 262 + pkg/csplugin/hclog_adapter.go | 213 + pkg/csplugin/notifier.go | 59 + pkg/csplugin/tests/notifications/dummy.yaml | 22 + pkg/csplugin/utils.go | 137 + pkg/csplugin/utils_windows.go | 242 + pkg/csplugin/watcher.go | 163 + pkg/csplugin/watcher_test.go | 117 + pkg/csprofiles/csprofiles.go | 196 + pkg/csprofiles/csprofiles_test.go | 202 + pkg/cstest/filenotfound_unix.go | 5 + pkg/cstest/filenotfound_windows.go | 5 + pkg/cstest/utils.go | 30 + pkg/cwhub/cwhub.go | 368 + pkg/cwhub/cwhub_test.go | 427 + pkg/cwhub/download.go | 277 + pkg/cwhub/download_test.go | 42 + pkg/cwhub/helpers.go | 225 + pkg/cwhub/helpers_test.go | 158 + pkg/cwhub/install.go | 202 + pkg/cwhub/loader.go | 424 + pkg/cwhub/path_separator_windows.go | 23 + pkg/cwhub/pathseparator.go | 24 + pkg/cwhub/tests/collection_v1.yaml | 2 + pkg/cwhub/tests/collection_v2.yaml | 3 + pkg/cwhub/tests/foobar_parser.yaml | 8 + pkg/cwhub/tests/index1.json | 121 + pkg/cwhub/tests/index2.json | 146 + pkg/cwversion/version.go | 104 + pkg/database/alerts.go | 1144 +++ pkg/database/bouncers.go | 95 + pkg/database/database.go | 190 + pkg/database/decisions.go | 652 ++ pkg/database/ent/alert.go | 432 + pkg/database/ent/alert/alert.go | 167 + pkg/database/ent/alert/where.go | 2473 ++++++ pkg/database/ent/alert_create.go | 875 +++ pkg/database/ent/alert_delete.go | 115 + pkg/database/ent/alert_query.go | 839 ++ pkg/database/ent/alert_update.go | 2404 ++++++ pkg/database/ent/bouncer.go | 220 + pkg/database/ent/bouncer/bouncer.go | 83 + pkg/database/ent/bouncer/where.go | 1138 +++ pkg/database/ent/bouncer_create.go | 473 ++ pkg/database/ent/bouncer_delete.go | 115 + pkg/database/ent/bouncer_query.go | 529 ++ pkg/database/ent/bouncer_update.go | 798 ++ pkg/database/ent/client.go | 827 ++ pkg/database/ent/config.go | 64 + pkg/database/ent/context.go | 33 + pkg/database/ent/decision.go | 297 + pkg/database/ent/decision/decision.go | 106 + pkg/database/ent/decision/where.go | 1373 ++++ pkg/database/ent/decision_create.go | 554 ++ pkg/database/ent/decision_delete.go | 115 + pkg/database/ent/decision_query.go | 613 ++ pkg/database/ent/decision_update.go | 1201 +++ pkg/database/ent/ent.go | 475 ++ pkg/database/ent/enttest/enttest.go | 84 + pkg/database/ent/event.go | 182 + pkg/database/ent/event/event.go | 76 + pkg/database/ent/event/where.go | 489 ++ pkg/database/ent/event_create.go | 347 + pkg/database/ent/event_delete.go | 115 + pkg/database/ent/event_query.go | 613 ++ pkg/database/ent/event_update.go | 577 ++ pkg/database/ent/generate.go | 4 + pkg/database/ent/hook/hook.go | 265 + pkg/database/ent/machine.go | 262 + pkg/database/ent/machine/machine.go | 101 + pkg/database/ent/machine/where.go | 1287 +++ pkg/database/ent/machine_create.go | 535 ++ pkg/database/ent/machine_delete.go | 115 + pkg/database/ent/machine_query.go | 609 ++ pkg/database/ent/machine_update.go | 1061 +++ pkg/database/ent/meta.go | 182 + pkg/database/ent/meta/meta.go | 76 + pkg/database/ent/meta/where.go | 524 ++ pkg/database/ent/meta_create.go | 347 + pkg/database/ent/meta_delete.go | 115 + pkg/database/ent/meta_query.go | 613 ++ pkg/database/ent/meta_update.go | 577 ++ pkg/database/ent/migrate/migrate.go | 64 + pkg/database/ent/migrate/schema.go | 214 + pkg/database/ent/mutation.go | 6995 +++++++++++++++++ pkg/database/ent/predicate/predicate.go | 25 + pkg/database/ent/runtime.go | 181 + pkg/database/ent/runtime/runtime.go | 10 + pkg/database/ent/schema/alert.go | 81 + pkg/database/ent/schema/bouncer.go | 39 + pkg/database/ent/schema/decision.go | 58 + pkg/database/ent/schema/event.go | 36 + pkg/database/ent/schema/machine.go | 47 + pkg/database/ent/schema/meta.go | 36 + pkg/database/ent/tx.go | 225 + pkg/database/errors.go | 22 + pkg/database/file_utils.go | 12 + pkg/database/file_utils_windows.go | 79 + pkg/database/machines.go | 185 + pkg/database/utils.go | 65 + pkg/exprhelpers/exprlib.go | 292 + pkg/exprhelpers/exprlib_test.go | 973 +++ pkg/exprhelpers/jsonextract.go | 138 + pkg/exprhelpers/jsonextract_test.go | 251 + pkg/exprhelpers/tests/test_data.txt | 3 + pkg/exprhelpers/tests/test_data_no_type.txt | 3 + pkg/exprhelpers/tests/test_data_re.txt | 2 + pkg/exprhelpers/tests/test_empty_line.txt | 12 + pkg/exprhelpers/visitor.go | 136 + pkg/exprhelpers/xml.go | 64 + pkg/exprhelpers/xml_test.go | 115 + pkg/hubtest/coverage.go | 177 + pkg/hubtest/hubtest.go | 113 + pkg/hubtest/hubtest_item.go | 627 ++ pkg/hubtest/parser_assert.go | 464 ++ pkg/hubtest/scenario_assert.go | 273 + pkg/hubtest/utils.go | 107 + pkg/hubtest/utils_test.go | 18 + pkg/leakybucket/README.md | 142 + pkg/leakybucket/blackhole.go | 68 + pkg/leakybucket/bucket.go | 366 + pkg/leakybucket/buckets.go | 29 + pkg/leakybucket/buckets_test.go | 309 + pkg/leakybucket/manager_load.go | 412 + pkg/leakybucket/manager_load_test.go | 121 + pkg/leakybucket/manager_run.go | 358 + pkg/leakybucket/manager_run_test.go | 182 + pkg/leakybucket/overflow_filter.go | 62 + pkg/leakybucket/overflows.go | 322 + pkg/leakybucket/processor.go | 29 + pkg/leakybucket/queue.go | 42 + pkg/leakybucket/reset_filter.go | 109 + .../tests/leaky-fixedqueue/bucket.yaml | 12 + .../tests/leaky-fixedqueue/scenarios.yaml | 2 + .../tests/leaky-fixedqueue/test.json | 98 + .../leaky-scope-range-expression/bucket.yaml | 14 + .../scenarios.yaml | 1 + .../leaky-scope-range-expression/test.json | 47 + .../bucket.yaml | 9 + .../scenarios.yaml | 2 + .../test.json | 81 + .../tests/overflow-with-meta/bucket.yaml | 9 + .../tests/overflow-with-meta/scenarios.yaml | 2 + .../tests/overflow-with-meta/test.json | 79 + .../tests/simple-counter-bh/bucket.yaml | 11 + .../tests/simple-counter-bh/scenarios.yaml | 2 + .../tests/simple-counter-bh/test.json | 35 + .../tests/simple-counter-timeout/bucket.yaml | 10 + .../simple-counter-timeout/scenarios.yaml | 2 + .../tests/simple-counter-timeout/test.json | 30 + .../tests/simple-counter/bucket.yaml | 10 + .../tests/simple-counter/scenarios.yaml | 2 + .../tests/simple-counter/test.json | 46 + .../tests/simple-leaky-blackhole/bucket.yaml | 13 + .../simple-leaky-blackhole/scenarios.yaml | 2 + .../tests/simple-leaky-blackhole/test.json | 123 + .../tests/simple-leaky-cancel_on/bucket.yaml | 13 + .../simple-leaky-cancel_on/scenarios.yaml | 2 + .../tests/simple-leaky-cancel_on/test.json | 117 + .../tests/simple-leaky-overflow/bucket.yaml | 11 + .../simple-leaky-overflow/scenarios.yaml | 2 + .../tests/simple-leaky-overflow/test.json | 46 + .../simple-leaky-ovflwfilter/bucket.yaml | 27 + .../simple-leaky-ovflwfilter/scenarios.yaml | 2 + .../tests/simple-leaky-ovflwfilter/test.json | 54 + .../tests/simple-leaky-underflow/bucket.yaml | 12 + .../simple-leaky-underflow/scenarios.yaml | 2 + .../tests/simple-leaky-underflow/test.json | 22 + .../simple-leaky-uniq-cachesize/bucket.yaml | 14 + .../scenarios.yaml | 2 + .../simple-leaky-uniq-cachesize/test.json | 194 + .../bucket.yaml | 13 + .../in-buckets_state.json | 113 + .../scenarios.yaml | 2 + .../test.json | 63 + .../tests/simple-leaky-uniq/bucket.yaml | 13 + .../tests/simple-leaky-uniq/scenarios.yaml | 2 + .../tests/simple-leaky-uniq/test.json | 63 + .../simple-trigger-external-data/bucket.yaml | 13 + .../scenarios.yaml | 2 + .../simple_patterns.txt | 3 + .../simple-trigger-external-data/test.json | 55 + .../simple-trigger-reprocess/bucket.yaml | 10 + .../simple-trigger-reprocess/reprocess.yaml | 9 + .../simple-trigger-reprocess/scenarios.yaml | 3 + .../tests/simple-trigger-reprocess/test.json | 52 + .../tests/simple-trigger/bucket.yaml | 9 + .../tests/simple-trigger/scenarios.yaml | 2 + .../tests/simple-trigger/test.json | 35 + pkg/leakybucket/timemachine.go | 53 + pkg/leakybucket/trigger.go | 42 + pkg/leakybucket/uniq.go | 92 + pkg/metabase/api.go | 84 + pkg/metabase/container.go | 186 + pkg/metabase/database.go | 101 + pkg/metabase/metabase.go | 383 + pkg/models/add_alerts_request.go | 73 + pkg/models/add_alerts_response.go | 27 + pkg/models/add_signals_request.go | 75 + pkg/models/add_signals_request_item.go | 232 + pkg/models/alert.go | 493 ++ pkg/models/decision.go | 198 + pkg/models/decisions_stream_response.go | 142 + pkg/models/delete_alerts_response.go | 50 + pkg/models/delete_decision_response.go | 50 + pkg/models/error_response.go | 76 + pkg/models/event.go | 120 + pkg/models/flush_decision_response.go | 43 + pkg/models/get_alerts_response.go | 73 + pkg/models/get_decisions_response.go | 73 + pkg/models/helpers.go | 76 + pkg/models/localapi_swagger.yaml | 1049 +++ pkg/models/meta.go | 115 + pkg/models/metrics.go | 195 + pkg/models/metrics_agent_info.go | 61 + pkg/models/metrics_bouncer_info.go | 61 + pkg/models/source.go | 109 + pkg/models/topx_response.go | 110 + pkg/models/watcher_auth_request.go | 96 + pkg/models/watcher_auth_response.go | 58 + pkg/models/watcher_registration_request.go | 93 + pkg/parser/README.md | 181 + pkg/parser/enrich.go | 71 + pkg/parser/enrich_date.go | 91 + pkg/parser/enrich_date_test.go | 66 + pkg/parser/enrich_dns.go | 31 + pkg/parser/enrich_geoip.go | 130 + pkg/parser/node.go | 557 ++ pkg/parser/node_test.go | 69 + pkg/parser/parsing_test.go | 416 + pkg/parser/runtime.go | 353 + pkg/parser/stage.go | 135 + pkg/parser/test_data/GeoLite2-ASN.mmdb | Bin 0 -> 3168 bytes pkg/parser/test_data/GeoLite2-City.mmdb | Bin 0 -> 20813 bytes .../tests/base-grok-expression/base-grok.yaml | 13 + .../tests/base-grok-expression/parsers.yaml | 2 + .../tests/base-grok-expression/test.yaml | 28 + .../base-grok-external-data/base-grok.yaml | 23 + .../base-grok-external-data/parsers.yaml | 2 + .../tests/base-grok-external-data/test.yaml | 32 + .../tests/base-grok-import/base-grok.yaml | 16 + .../tests/base-grok-import/parsers.yaml | 2 + pkg/parser/tests/base-grok-import/test.yaml | 43 + .../tests/base-grok-no-subnode/base-grok.yaml | 13 + .../tests/base-grok-no-subnode/parsers.yaml | 2 + .../tests/base-grok-no-subnode/test.yaml | 29 + pkg/parser/tests/base-grok/base-grok.yaml | 14 + pkg/parser/tests/base-grok/parsers.yaml | 2 + pkg/parser/tests/base-grok/test.yaml | 29 + .../tests/base-json-extract/base-grok.yaml | 17 + .../tests/base-json-extract/base-grok2.yaml | 16 + .../tests/base-json-extract/parsers.yaml | 4 + pkg/parser/tests/base-json-extract/test.yaml | 19 + pkg/parser/tests/base-tree/base-grok.yaml | 33 + pkg/parser/tests/base-tree/parsers.yaml | 2 + pkg/parser/tests/base-tree/test.yaml | 30 + .../tests/dateparser-enrich/base-grok.yaml | 10 + .../tests/dateparser-enrich/parsers.yaml | 2 + pkg/parser/tests/dateparser-enrich/test.yaml | 22 + pkg/parser/tests/geoip-enrich/base-grok.yaml | 22 + pkg/parser/tests/geoip-enrich/parsers.yaml | 2 + pkg/parser/tests/geoip-enrich/test.yaml | 27 + .../tests/multi-stage-grok/base-grok-s00.yaml | 12 + .../tests/multi-stage-grok/base-grok-s01.yaml | 11 + .../tests/multi-stage-grok/parsers.yaml | 4 + pkg/parser/tests/multi-stage-grok/test.yaml | 29 + .../tests/reverse-dns-enrich/base-grok.yaml | 8 + .../tests/reverse-dns-enrich/parsers.yaml | 2 + pkg/parser/tests/reverse-dns-enrich/test.yaml | 21 + pkg/parser/tests/sample_strings.txt | 3 + .../tests/whitelist-base/base-grok.yaml | 14 + pkg/parser/tests/whitelist-base/parsers.yaml | 2 + pkg/parser/tests/whitelist-base/test.yaml | 53 + pkg/parser/unix_parser.go | 109 + pkg/parser/whitelist.go | 23 + pkg/protobufs/README.md | 8 + pkg/protobufs/notifier.pb.go | 395 + pkg/protobufs/notifier.proto | 19 + pkg/protobufs/plugin_interface.go | 47 + pkg/time/AUTHORS | 3 + pkg/time/CONTRIBUTING.md | 26 + pkg/time/CONTRIBUTORS | 3 + pkg/time/LICENSE | 27 + pkg/time/PATENTS | 22 + pkg/time/README.md | 17 + pkg/time/rate/rate.go | 476 ++ pkg/time/rate/rate_test.go | 483 ++ pkg/types/constants.go | 5 + pkg/types/dataset.go | 74 + pkg/types/dataset_test.go | 43 + pkg/types/event.go | 108 + pkg/types/grok_pattern.go | 41 + pkg/types/ip.go | 108 + pkg/types/ip_test.go | 220 + pkg/types/line.go | 12 + pkg/types/profile.go | 25 + pkg/types/utils.go | 268 + pkg/yamlpatch/merge.go | 168 + pkg/yamlpatch/merge_test.go | 238 + pkg/yamlpatch/patcher.go | 154 + pkg/yamlpatch/patcher_test.go | 313 + pkg/yamlpatch/testdata/base.yaml | 13 + pkg/yamlpatch/testdata/expect.yaml | 13 + pkg/yamlpatch/testdata/production.yaml | 13 + platform/freebsd.mk | 6 + platform/linux.mk | 5 + platform/openbsd.mk | 6 + platform/unix_common.mk | 22 + platform/windows.mk | 35 + plugins/notifications/dummy/LICENSE | 21 + plugins/notifications/dummy/Makefile | 20 + plugins/notifications/dummy/dummy.yaml | 28 + plugins/notifications/dummy/main.go | 88 + plugins/notifications/email/LICENSE | 21 + plugins/notifications/email/Makefile | 20 + plugins/notifications/email/email.yaml | 45 + plugins/notifications/email/go.mod | 27 + plugins/notifications/email/go.sum | 185 + plugins/notifications/email/main.go | 149 + plugins/notifications/http/LICENSE | 21 + plugins/notifications/http/Makefile | 20 + plugins/notifications/http/go.mod | 26 + plugins/notifications/http/go.sum | 183 + plugins/notifications/http/http.yaml | 36 + plugins/notifications/http/main.go | 115 + plugins/notifications/slack/LICENSE | 21 + plugins/notifications/slack/Makefile | 20 + plugins/notifications/slack/go.mod | 29 + plugins/notifications/slack/go.sum | 192 + plugins/notifications/slack/main.go | 81 + plugins/notifications/slack/slack.yaml | 36 + plugins/notifications/splunk/LICENSE | 21 + plugins/notifications/splunk/Makefile | 20 + plugins/notifications/splunk/go.mod | 26 + plugins/notifications/splunk/go.sum | 183 + plugins/notifications/splunk/main.go | 119 + plugins/notifications/splunk/splunk.yaml | 28 + rpm/SOURCES/80-crowdsec.preset | 3 + rpm/SOURCES/crowdsec.unit.patch | 13 + rpm/SOURCES/user.patch | 11 + rpm/SPECS/crowdsec.spec | 237 + scripts/check_go_version.ps1 | 19 + scripts/test_env.ps1 | 90 + scripts/test_env.sh | 122 + scripts/test_wizard_upgrade.sh | 359 + tests/.gitignore | 4 + tests/README.md | 426 + tests/ansible/.gitignore | 2 + tests/ansible/README.md | 164 + tests/ansible/ansible.cfg | 15 + tests/ansible/env/example.sh | 51 + tests/ansible/env/pkg-sqlite.sh | 17 + tests/ansible/env/source-mysql.sh | 14 + tests/ansible/env/source-pgx.sh | 14 + tests/ansible/env/source-postgres.sh | 14 + tests/ansible/env/source-sqlite.sh | 14 + tests/ansible/install_binary_package.yml | 112 + tests/ansible/prepare-run | 49 + tests/ansible/prepare_tests.yml | 21 + tests/ansible/provision_dependencies.yml | 43 + tests/ansible/provision_test_suite.yml | 34 + tests/ansible/requirements.yml | 18 + .../ansible/roles/make_fixture/tasks/main.yml | 77 + .../ansible/roles/make_fixture/vars/main.yml | 3 + .../roles/run_func_tests/tasks/main.yml | 104 + .../roles/run_func_tests/vars/main.yml | 4 + tests/ansible/run_all.yml | 8 + tests/ansible/run_tests.yml | 20 + tests/ansible/vagrant/alma-8/Vagrantfile | 8 + tests/ansible/vagrant/alma-9/Vagrantfile | 8 + tests/ansible/vagrant/centos-7/Vagrantfile | 8 + tests/ansible/vagrant/centos-7/skip | 11 + tests/ansible/vagrant/centos-8/Vagrantfile | 8 + tests/ansible/vagrant/centos-9/Vagrantfile | 8 + tests/ansible/vagrant/common | 46 + .../vagrant/debian-10-buster/Vagrantfile | 8 + .../vagrant/debian-11-bullseye/Vagrantfile | 8 + .../vagrant/debian-9-stretch/Vagrantfile | 8 + tests/ansible/vagrant/debian-9-stretch/skip | 11 + .../vagrant/debian-testing/Vagrantfile | 8 + .../experimental/alpine-3.16/Vagrantfile | 20 + .../experimental/alpine-3.16/bootstrap | 7 + .../vagrant/experimental/alpine-3.16/skip | 9 + .../experimental/amazon-linux-2/Vagrantfile | 18 + .../experimental/amazon-linux-2/issues.txt | 3 + .../vagrant/experimental/arch/Vagrantfile | 18 + .../vagrant/experimental/devuan-3/Vagrantfile | 18 + .../vagrant/experimental/devuan-3/skip | 9 + .../experimental/dragonflybsd-6/Vagrantfile | 18 + .../vagrant/experimental/gentoo/Vagrantfile | 20 + .../vagrant/experimental/gentoo/bootstrap | 3 + .../experimental/hardenedbsd-13/Vagrantfile | 20 + .../experimental/hardenedbsd-13/bootstrap | 5 + .../vagrant/experimental/hardenedbsd-13/skip | 9 + .../vagrant/experimental/netbsd-9/Vagrantfile | 20 + .../experimental/openbsd-7/Vagrantfile | 20 + .../vagrant/experimental/openbsd-7/bootstrap | 6 + .../vagrant/experimental/openbsd-7/skip | 9 + .../experimental/opensuse-15.4/Vagrantfile | 20 + .../ubuntu-14.04-trusty/Vagrantfile | 20 + tests/ansible/vagrant/fedora-33/Vagrantfile | 9 + tests/ansible/vagrant/fedora-33/skip | 9 + tests/ansible/vagrant/fedora-34/Vagrantfile | 9 + tests/ansible/vagrant/fedora-34/skip | 9 + tests/ansible/vagrant/fedora-35/Vagrantfile | 9 + tests/ansible/vagrant/fedora-35/skip | 9 + tests/ansible/vagrant/fedora-36/Vagrantfile | 9 + tests/ansible/vagrant/fedora-36/skip | 9 + tests/ansible/vagrant/freebsd-12/Vagrantfile | 8 + tests/ansible/vagrant/freebsd-12/skip | 12 + tests/ansible/vagrant/freebsd-13/Vagrantfile | 8 + tests/ansible/vagrant/freebsd-13/skip | 12 + tests/ansible/vagrant/oracle-7/Vagrantfile | 8 + tests/ansible/vagrant/oracle-7/skip | 11 + tests/ansible/vagrant/oracle-8/Vagrantfile | 8 + tests/ansible/vagrant/oracle-9/Vagrantfile | 8 + tests/ansible/vagrant/rocky-8/Vagrantfile | 8 + tests/ansible/vagrant/rocky-9/Vagrantfile | 8 + .../vagrant/ubuntu-16.04-xenial/Vagrantfile | 8 + .../ansible/vagrant/ubuntu-16.04-xenial/skip | 11 + .../vagrant/ubuntu-18.04-bionic/Vagrantfile | 9 + .../vagrant/ubuntu-20.04-focal/Vagrantfile | 8 + .../vagrant/ubuntu-22.04-jammy/Vagrantfile | 8 + tests/ansible/vars/go.yml | 5 + tests/ansible/vars/mysql.yml | 5 + tests/ansible/vars/postgres.yml | 30 + tests/bats.mk | 118 + tests/bats/01_base.bats | 271 + tests/bats/01_crowdsec.bats | 181 + tests/bats/02_nolapi.bats | 90 + tests/bats/03_noagent.bats | 75 + tests/bats/04_capi.bats | 67 + tests/bats/04_nocapi.bats | 81 + tests/bats/05_config_yaml_local.bats | 143 + tests/bats/10_bouncers.bats | 58 + tests/bats/11_bouncers_tls.bats | 97 + tests/bats/20_collections.bats | 114 + tests/bats/30_machines.bats | 83 + tests/bats/30_machines_tls.bats | 130 + tests/bats/40_cold-logs.bats | 63 + tests/bats/40_live-ban.bats | 45 + tests/bats/50_simulation.bats | 66 + tests/bats/70_http_plugin.bats | 86 + tests/bats/71_dummy_plugin.bats | 78 + tests/bats/72_plugin_badconfig.bats | 110 + tests/bats/80_alerts.bats | 193 + tests/bats/90_decisions.bats | 67 + tests/bats/97_ipv4_single.bats | 107 + tests/bats/97_ipv6_single.bats | 155 + tests/bats/98_ipv4_range.bats | 134 + tests/bats/98_ipv6_range.bats | 217 + tests/bats/99_lapi-stream-mode-scenario.bats | 233 + tests/bats/99_lapi-stream-mode-scopes.bats | 64 + tests/bats/99_lapi-stream-mode.bats | 73 + tests/bats/reformat | 17 + tests/bats/testdata/cfssl/agent.json | 16 + tests/bats/testdata/cfssl/agent_invalid.json | 16 + tests/bats/testdata/cfssl/bouncer.json | 16 + .../bats/testdata/cfssl/bouncer_invalid.json | 16 + tests/bats/testdata/cfssl/ca.json | 16 + tests/bats/testdata/cfssl/intermediate.json | 19 + tests/bats/testdata/cfssl/profiles.json | 44 + tests/bats/testdata/cfssl/server.json | 20 + tests/bin/assert-crowdsec-not-running | 25 + tests/bin/check-requirements | 110 + tests/bin/collect-hub-coverage | 32 + tests/bin/crowdsec-wrapper | 49 + tests/bin/cscli-wrapper | 40 + tests/bin/generate-hub-tests | 50 + tests/bin/mock-http.py | 47 + tests/bin/wait-for-port | 42 + tests/disable-capi | 8 + tests/dyn-bats/README.md | 2 + tests/enable-capi | 11 + tests/instance-crowdsec | 16 + tests/instance-data | 16 + tests/instance-db | 18 + tests/instance-mock-http | 67 + tests/lib/config/config-global | 119 + tests/lib/config/config-local | 174 + tests/lib/db/instance-mysql | 124 + tests/lib/db/instance-pgx | 1 + tests/lib/db/instance-postgres | 101 + tests/lib/db/instance-sqlite | 87 + tests/lib/init/crowdsec-daemon | 80 + tests/lib/init/crowdsec-systemd | 67 + tests/lib/setup.sh | 11 + tests/lib/setup_file.sh | 213 + tests/lib/teardown_file.sh | 8 + tests/localstack/docker-compose.yml | 83 + tests/run-tests | 54 + windows/Chocolatey/crowdsec/ReadMe.md | 133 + windows/Chocolatey/crowdsec/crowdsec.nuspec | 45 + windows/Chocolatey/crowdsec/tools/LICENSE.txt | 26 + .../crowdsec/tools/VERIFICATION.txt | 9 + .../crowdsec/tools/chocolateybeforemodify.ps1 | 1 + .../crowdsec/tools/chocolateyinstall.ps1 | 29 + .../crowdsec/tools/chocolateyuninstall.ps1 | 30 + windows/README.md | 51 + windows/install_dev_windows.ps1 | 6 + windows/install_installer_windows.ps1 | 2 + windows/installer/WixUI_HK.wxs | 65 + windows/installer/crowdsec_icon.ico | Bin 0 -> 4286 bytes windows/installer/crowdsec_msi_top_banner.bmp | Bin 0 -> 114514 bytes windows/installer/installer_dialog.bmp | Bin 0 -> 155914 bytes windows/installer/product.wxs | 183 + wizard.sh | 816 ++ 783 files changed, 112586 insertions(+) create mode 100644 .dockerignore create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yaml create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yaml create mode 100644 .github/release-drafter.yml create mode 120000 .github/workflows/.yamllint create mode 100644 .github/workflows/bats-hub.yml create mode 100644 .github/workflows/bats-mysql.yml create mode 100644 .github/workflows/bats-postgres.yml create mode 100644 .github/workflows/bats-sqlite-coverage.yml create mode 100644 .github/workflows/bats.yml create mode 100644 .github/workflows/ci-windows-build-msi.yml create mode 100644 .github/workflows/ci_golangci-lint.yml create mode 100644 .github/workflows/ci_release-drafter.yml create mode 100644 .github/workflows/codeql-analysis.yml create mode 100644 .github/workflows/dispatch_ci_hub.yaml create mode 100644 .github/workflows/dispatch_create_branch_hub.yaml create mode 100644 .github/workflows/dispatch_delete_branch_hub.yaml create mode 100644 .github/workflows/go-tests-windows.yml create mode 100644 .github/workflows/go-tests.yml create mode 100644 .github/workflows/release_publish-package.yml create mode 100644 .github/workflows/release_publish_docker-image-debian.yml create mode 100644 .github/workflows/release_publish_docker-image.yml create mode 100644 .github/workflows/update_docker_hub_doc.yml create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 .golangci.yml create mode 100644 .yamllint create mode 100644 CONTRIBUTING.md create mode 100644 Dockerfile create mode 100644 Dockerfile.debian create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 SECURITY.md create mode 100644 azure-pipelines.yml create mode 100644 cmd/crowdsec-cli/Makefile create mode 100644 cmd/crowdsec-cli/alerts.go create mode 100644 cmd/crowdsec-cli/alerts_table.go create mode 100644 cmd/crowdsec-cli/bouncers.go create mode 100644 cmd/crowdsec-cli/bouncers_table.go create mode 100644 cmd/crowdsec-cli/capi.go create mode 100644 cmd/crowdsec-cli/collections.go create mode 100644 cmd/crowdsec-cli/completion.go create mode 100644 cmd/crowdsec-cli/config.go create mode 100644 cmd/crowdsec-cli/console.go create mode 100644 cmd/crowdsec-cli/console_table.go create mode 100644 cmd/crowdsec-cli/dashboard.go create mode 100644 cmd/crowdsec-cli/decisions.go create mode 100644 cmd/crowdsec-cli/decisions_table.go create mode 100644 cmd/crowdsec-cli/explain.go create mode 100644 cmd/crowdsec-cli/hub.go create mode 100644 cmd/crowdsec-cli/hubtest.go create mode 100644 cmd/crowdsec-cli/hubtest_table.go create mode 100644 cmd/crowdsec-cli/lapi.go create mode 100644 cmd/crowdsec-cli/machines.go create mode 100644 cmd/crowdsec-cli/machines_table.go create mode 100644 cmd/crowdsec-cli/main.go create mode 100644 cmd/crowdsec-cli/main_test.go create mode 100644 cmd/crowdsec-cli/messages.go create mode 100644 cmd/crowdsec-cli/metrics.go create mode 100644 cmd/crowdsec-cli/metrics_table.go create mode 100644 cmd/crowdsec-cli/notifications.go create mode 100644 cmd/crowdsec-cli/notifications_table.go create mode 100644 cmd/crowdsec-cli/parsers.go create mode 100644 cmd/crowdsec-cli/postoverflows.go create mode 100644 cmd/crowdsec-cli/scenarios.go create mode 100644 cmd/crowdsec-cli/simulation.go create mode 100644 cmd/crowdsec-cli/support.go create mode 100644 cmd/crowdsec-cli/tables.go create mode 100644 cmd/crowdsec-cli/utils.go create mode 100644 cmd/crowdsec-cli/utils_table.go create mode 100644 cmd/crowdsec/Makefile create mode 100644 cmd/crowdsec/api.go create mode 100644 cmd/crowdsec/crowdsec.go create mode 100644 cmd/crowdsec/event_log_hook_windows.go create mode 100644 cmd/crowdsec/main.go create mode 100644 cmd/crowdsec/main_test.go create mode 100644 cmd/crowdsec/metrics.go create mode 100644 cmd/crowdsec/output.go create mode 100644 cmd/crowdsec/parse.go create mode 100644 cmd/crowdsec/pour.go create mode 100644 cmd/crowdsec/run_in_svc.go create mode 100644 cmd/crowdsec/run_in_svc_windows.go create mode 100644 cmd/crowdsec/serve.go create mode 100644 cmd/crowdsec/win_service.go create mode 100644 cmd/crowdsec/win_service_install.go create mode 100644 cmd/crowdsec/win_service_manage.go create mode 100644 config/acquis.yaml create mode 100644 config/acquis_win.yaml create mode 100644 config/config.yaml create mode 100644 config/config_win.yaml create mode 100644 config/config_win_no_lapi.yaml create mode 100644 config/console.yaml create mode 100644 config/crowdsec.cron.daily create mode 100644 config/crowdsec.service create mode 100644 config/dev.yaml create mode 100644 config/local_api_credentials.yaml create mode 100644 config/online_api_credentials.yaml create mode 100644 config/patterns/aws create mode 100644 config/patterns/bacula create mode 100644 config/patterns/bro create mode 100644 config/patterns/cowrie_honeypot create mode 100644 config/patterns/exim create mode 100644 config/patterns/firewalls create mode 100644 config/patterns/haproxy create mode 100644 config/patterns/java create mode 100644 config/patterns/junos create mode 100644 config/patterns/linux-syslog create mode 100644 config/patterns/mcollective create mode 100644 config/patterns/modsecurity create mode 100644 config/patterns/mongodb create mode 100644 config/patterns/mysql create mode 100644 config/patterns/nagios create mode 100644 config/patterns/nginx create mode 100644 config/patterns/paths create mode 100644 config/patterns/postgresql create mode 100644 config/patterns/rails create mode 100644 config/patterns/redis create mode 100644 config/patterns/ruby create mode 100644 config/patterns/smb create mode 100644 config/patterns/ssh create mode 100644 config/patterns/tcpdump create mode 100644 config/profiles.yaml create mode 100644 config/simulation.yaml create mode 100644 config/user.yaml create mode 100644 debian/.gitignore create mode 100644 debian/README.md create mode 100644 debian/changelog create mode 100644 debian/compat create mode 100644 debian/control create mode 120000 debian/crowdsec.cron.daily create mode 100644 debian/crowdsec.service create mode 100644 debian/install create mode 100644 debian/patches/config_plugins create mode 100644 debian/patches/series create mode 100644 debian/postinst create mode 100644 debian/postrm create mode 100644 debian/preinst create mode 100644 debian/prerm create mode 100755 debian/rules create mode 100644 debian/templates create mode 100644 docker/README.md create mode 100644 docker/config.yaml create mode 100755 docker/docker_start.sh create mode 100644 go.mod create mode 100644 go.sum create mode 100644 make_chocolatey.ps1 create mode 100644 make_installer.ps1 create mode 100644 pkg/acquisition/acquisition.go create mode 100644 pkg/acquisition/acquisition_test.go create mode 100644 pkg/acquisition/configuration/configuration.go create mode 100644 pkg/acquisition/modules/cloudwatch/cloudwatch.go create mode 100644 pkg/acquisition/modules/cloudwatch/cloudwatch_test.go create mode 100644 pkg/acquisition/modules/docker/docker.go create mode 100644 pkg/acquisition/modules/docker/docker_test.go create mode 100644 pkg/acquisition/modules/file/file.go create mode 100644 pkg/acquisition/modules/file/file_test.go create mode 100644 pkg/acquisition/modules/file/tailline.go create mode 100644 pkg/acquisition/modules/file/tailline_windows.go create mode 100644 pkg/acquisition/modules/file/test_files/bad.gz create mode 100644 pkg/acquisition/modules/file/test_files/test.log create mode 100644 pkg/acquisition/modules/file/test_files/test.log.gz create mode 100644 pkg/acquisition/modules/journalctl/journalctl.go create mode 100644 pkg/acquisition/modules/journalctl/journalctl_test.go create mode 100755 pkg/acquisition/modules/journalctl/test_files/journalctl create mode 100644 pkg/acquisition/modules/kafka/kafka.go create mode 100644 pkg/acquisition/modules/kafka/kafka_test.go create mode 100644 pkg/acquisition/modules/kafka/testdata/kafkaClient.certificate.pem create mode 100644 pkg/acquisition/modules/kafka/testdata/kafkaClient.key create mode 100644 pkg/acquisition/modules/kafka/testdata/snakeoil-ca-1.crt create mode 100644 pkg/acquisition/modules/kinesis/kinesis.go create mode 100644 pkg/acquisition/modules/kinesis/kinesis_test.go create mode 100644 pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go create mode 100644 pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go create mode 100644 pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go create mode 100644 pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go create mode 100644 pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go create mode 100644 pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go create mode 100644 pkg/acquisition/modules/syslog/internal/parser/utils/utils.go create mode 100644 pkg/acquisition/modules/syslog/internal/server/syslogserver.go create mode 100644 pkg/acquisition/modules/syslog/syslog.go create mode 100644 pkg/acquisition/modules/syslog/syslog_test.go create mode 100644 pkg/acquisition/modules/wineventlog/wineventlog.go create mode 100644 pkg/acquisition/modules/wineventlog/wineventlog_test.go create mode 100644 pkg/acquisition/modules/wineventlog/wineventlog_windows.go create mode 100644 pkg/acquisition/test_files/backward_compat.yaml create mode 100644 pkg/acquisition/test_files/bad_filetype.yaml create mode 100644 pkg/acquisition/test_files/bad_source.yaml create mode 100644 pkg/acquisition/test_files/badyaml.yaml create mode 100644 pkg/acquisition/test_files/basic_filemode.yaml create mode 100644 pkg/acquisition/test_files/emptyitem.yaml create mode 100644 pkg/acquisition/test_files/missing_labels.yaml create mode 100644 pkg/apiclient/alerts_service.go create mode 100644 pkg/apiclient/alerts_service_test.go create mode 100644 pkg/apiclient/auth.go create mode 100644 pkg/apiclient/auth_service.go create mode 100644 pkg/apiclient/auth_service_test.go create mode 100644 pkg/apiclient/auth_test.go create mode 100644 pkg/apiclient/client.go create mode 100644 pkg/apiclient/client_http.go create mode 100644 pkg/apiclient/client_http_test.go create mode 100644 pkg/apiclient/client_test.go create mode 100644 pkg/apiclient/config.go create mode 100644 pkg/apiclient/decisions_service.go create mode 100644 pkg/apiclient/decisions_service_test.go create mode 100644 pkg/apiclient/heartbeat.go create mode 100644 pkg/apiclient/metrics.go create mode 100644 pkg/apiclient/signal.go create mode 100644 pkg/apiserver/alerts_test.go create mode 100644 pkg/apiserver/api_key_test.go create mode 100644 pkg/apiserver/apic.go create mode 100644 pkg/apiserver/apic_test.go create mode 100644 pkg/apiserver/apiserver.go create mode 100644 pkg/apiserver/apiserver_test.go create mode 100644 pkg/apiserver/controllers/controller.go create mode 100644 pkg/apiserver/controllers/v1/alerts.go create mode 100644 pkg/apiserver/controllers/v1/controller.go create mode 100644 pkg/apiserver/controllers/v1/decisions.go create mode 100644 pkg/apiserver/controllers/v1/errors.go create mode 100644 pkg/apiserver/controllers/v1/heartbeat.go create mode 100644 pkg/apiserver/controllers/v1/machines.go create mode 100644 pkg/apiserver/controllers/v1/metrics.go create mode 100644 pkg/apiserver/controllers/v1/utils.go create mode 100644 pkg/apiserver/decisions_test.go create mode 100644 pkg/apiserver/heartbeat_test.go create mode 100644 pkg/apiserver/jwt_test.go create mode 100644 pkg/apiserver/machines_test.go create mode 100644 pkg/apiserver/middlewares/v1/api_key.go create mode 100644 pkg/apiserver/middlewares/v1/jwt.go create mode 100644 pkg/apiserver/middlewares/v1/middlewares.go create mode 100644 pkg/apiserver/middlewares/v1/tls_auth.go create mode 100644 pkg/apiserver/tests/alertWithInvalidMachineID_sample.json create mode 100644 pkg/apiserver/tests/alert_bulk.json create mode 100644 pkg/apiserver/tests/alert_duplicate.json create mode 100644 pkg/apiserver/tests/alert_minibulk+simul.json create mode 100644 pkg/apiserver/tests/alert_minibulk.json create mode 100644 pkg/apiserver/tests/alert_sample.json create mode 100644 pkg/apiserver/tests/alert_ssh-bf.json create mode 100644 pkg/apiserver/tests/alert_stream_fixture.json create mode 100644 pkg/apiserver/tests/invalidAlert_sample.json create mode 100644 pkg/apiserver/tests/profiles.yaml create mode 100644 pkg/apiserver/utils.go create mode 100644 pkg/csconfig/api.go create mode 100644 pkg/csconfig/api_test.go create mode 100644 pkg/csconfig/common.go create mode 100644 pkg/csconfig/common_test.go create mode 100644 pkg/csconfig/config.go create mode 100644 pkg/csconfig/config_paths.go create mode 100644 pkg/csconfig/config_test.go create mode 100644 pkg/csconfig/console.go create mode 100644 pkg/csconfig/crowdsec_service.go create mode 100644 pkg/csconfig/crowdsec_service_test.go create mode 100644 pkg/csconfig/cscli.go create mode 100644 pkg/csconfig/cscli_test.go create mode 100644 pkg/csconfig/database.go create mode 100644 pkg/csconfig/database_test.go create mode 100644 pkg/csconfig/hub.go create mode 100644 pkg/csconfig/hub_test.go create mode 100644 pkg/csconfig/plugin_config.go create mode 100644 pkg/csconfig/profiles.go create mode 100644 pkg/csconfig/prometheus.go create mode 100644 pkg/csconfig/prometheus_test.go create mode 100644 pkg/csconfig/simulation.go create mode 100644 pkg/csconfig/simulation_test.go create mode 100644 pkg/csconfig/tests/acquis.yaml create mode 100644 pkg/csconfig/tests/acquis/acquis.yaml create mode 100644 pkg/csconfig/tests/bad_lapi-secrets.yaml create mode 100644 pkg/csconfig/tests/bad_online-api-secrets.yaml create mode 100644 pkg/csconfig/tests/config.yaml create mode 100644 pkg/csconfig/tests/lapi-secrets.yaml create mode 100644 pkg/csconfig/tests/online-api-secrets.yaml create mode 100644 pkg/csconfig/tests/profiles.yaml create mode 100644 pkg/csconfig/tests/simulation.yaml create mode 100644 pkg/csplugin/broker.go create mode 100644 pkg/csplugin/broker_test.go create mode 100644 pkg/csplugin/broker_win_test.go create mode 100644 pkg/csplugin/hclog_adapter.go create mode 100644 pkg/csplugin/notifier.go create mode 100644 pkg/csplugin/tests/notifications/dummy.yaml create mode 100644 pkg/csplugin/utils.go create mode 100644 pkg/csplugin/utils_windows.go create mode 100644 pkg/csplugin/watcher.go create mode 100644 pkg/csplugin/watcher_test.go create mode 100644 pkg/csprofiles/csprofiles.go create mode 100644 pkg/csprofiles/csprofiles_test.go create mode 100644 pkg/cstest/filenotfound_unix.go create mode 100644 pkg/cstest/filenotfound_windows.go create mode 100644 pkg/cstest/utils.go create mode 100644 pkg/cwhub/cwhub.go create mode 100644 pkg/cwhub/cwhub_test.go create mode 100644 pkg/cwhub/download.go create mode 100644 pkg/cwhub/download_test.go create mode 100644 pkg/cwhub/helpers.go create mode 100644 pkg/cwhub/helpers_test.go create mode 100644 pkg/cwhub/install.go create mode 100644 pkg/cwhub/loader.go create mode 100644 pkg/cwhub/path_separator_windows.go create mode 100644 pkg/cwhub/pathseparator.go create mode 100644 pkg/cwhub/tests/collection_v1.yaml create mode 100644 pkg/cwhub/tests/collection_v2.yaml create mode 100644 pkg/cwhub/tests/foobar_parser.yaml create mode 100644 pkg/cwhub/tests/index1.json create mode 100644 pkg/cwhub/tests/index2.json create mode 100644 pkg/cwversion/version.go create mode 100644 pkg/database/alerts.go create mode 100644 pkg/database/bouncers.go create mode 100644 pkg/database/database.go create mode 100644 pkg/database/decisions.go create mode 100644 pkg/database/ent/alert.go create mode 100644 pkg/database/ent/alert/alert.go create mode 100644 pkg/database/ent/alert/where.go create mode 100644 pkg/database/ent/alert_create.go create mode 100644 pkg/database/ent/alert_delete.go create mode 100644 pkg/database/ent/alert_query.go create mode 100644 pkg/database/ent/alert_update.go create mode 100644 pkg/database/ent/bouncer.go create mode 100644 pkg/database/ent/bouncer/bouncer.go create mode 100644 pkg/database/ent/bouncer/where.go create mode 100644 pkg/database/ent/bouncer_create.go create mode 100644 pkg/database/ent/bouncer_delete.go create mode 100644 pkg/database/ent/bouncer_query.go create mode 100644 pkg/database/ent/bouncer_update.go create mode 100644 pkg/database/ent/client.go create mode 100644 pkg/database/ent/config.go create mode 100644 pkg/database/ent/context.go create mode 100644 pkg/database/ent/decision.go create mode 100644 pkg/database/ent/decision/decision.go create mode 100644 pkg/database/ent/decision/where.go create mode 100644 pkg/database/ent/decision_create.go create mode 100644 pkg/database/ent/decision_delete.go create mode 100644 pkg/database/ent/decision_query.go create mode 100644 pkg/database/ent/decision_update.go create mode 100644 pkg/database/ent/ent.go create mode 100644 pkg/database/ent/enttest/enttest.go create mode 100644 pkg/database/ent/event.go create mode 100644 pkg/database/ent/event/event.go create mode 100644 pkg/database/ent/event/where.go create mode 100644 pkg/database/ent/event_create.go create mode 100644 pkg/database/ent/event_delete.go create mode 100644 pkg/database/ent/event_query.go create mode 100644 pkg/database/ent/event_update.go create mode 100644 pkg/database/ent/generate.go create mode 100644 pkg/database/ent/hook/hook.go create mode 100644 pkg/database/ent/machine.go create mode 100644 pkg/database/ent/machine/machine.go create mode 100644 pkg/database/ent/machine/where.go create mode 100644 pkg/database/ent/machine_create.go create mode 100644 pkg/database/ent/machine_delete.go create mode 100644 pkg/database/ent/machine_query.go create mode 100644 pkg/database/ent/machine_update.go create mode 100644 pkg/database/ent/meta.go create mode 100644 pkg/database/ent/meta/meta.go create mode 100644 pkg/database/ent/meta/where.go create mode 100644 pkg/database/ent/meta_create.go create mode 100644 pkg/database/ent/meta_delete.go create mode 100644 pkg/database/ent/meta_query.go create mode 100644 pkg/database/ent/meta_update.go create mode 100644 pkg/database/ent/migrate/migrate.go create mode 100644 pkg/database/ent/migrate/schema.go create mode 100644 pkg/database/ent/mutation.go create mode 100644 pkg/database/ent/predicate/predicate.go create mode 100644 pkg/database/ent/runtime.go create mode 100644 pkg/database/ent/runtime/runtime.go create mode 100644 pkg/database/ent/schema/alert.go create mode 100644 pkg/database/ent/schema/bouncer.go create mode 100644 pkg/database/ent/schema/decision.go create mode 100644 pkg/database/ent/schema/event.go create mode 100644 pkg/database/ent/schema/machine.go create mode 100644 pkg/database/ent/schema/meta.go create mode 100644 pkg/database/ent/tx.go create mode 100644 pkg/database/errors.go create mode 100644 pkg/database/file_utils.go create mode 100644 pkg/database/file_utils_windows.go create mode 100644 pkg/database/machines.go create mode 100644 pkg/database/utils.go create mode 100644 pkg/exprhelpers/exprlib.go create mode 100644 pkg/exprhelpers/exprlib_test.go create mode 100644 pkg/exprhelpers/jsonextract.go create mode 100644 pkg/exprhelpers/jsonextract_test.go create mode 100644 pkg/exprhelpers/tests/test_data.txt create mode 100644 pkg/exprhelpers/tests/test_data_no_type.txt create mode 100644 pkg/exprhelpers/tests/test_data_re.txt create mode 100644 pkg/exprhelpers/tests/test_empty_line.txt create mode 100644 pkg/exprhelpers/visitor.go create mode 100644 pkg/exprhelpers/xml.go create mode 100644 pkg/exprhelpers/xml_test.go create mode 100644 pkg/hubtest/coverage.go create mode 100644 pkg/hubtest/hubtest.go create mode 100644 pkg/hubtest/hubtest_item.go create mode 100644 pkg/hubtest/parser_assert.go create mode 100644 pkg/hubtest/scenario_assert.go create mode 100644 pkg/hubtest/utils.go create mode 100644 pkg/hubtest/utils_test.go create mode 100644 pkg/leakybucket/README.md create mode 100644 pkg/leakybucket/blackhole.go create mode 100644 pkg/leakybucket/bucket.go create mode 100644 pkg/leakybucket/buckets.go create mode 100644 pkg/leakybucket/buckets_test.go create mode 100644 pkg/leakybucket/manager_load.go create mode 100644 pkg/leakybucket/manager_load_test.go create mode 100644 pkg/leakybucket/manager_run.go create mode 100644 pkg/leakybucket/manager_run_test.go create mode 100644 pkg/leakybucket/overflow_filter.go create mode 100644 pkg/leakybucket/overflows.go create mode 100644 pkg/leakybucket/processor.go create mode 100644 pkg/leakybucket/queue.go create mode 100644 pkg/leakybucket/reset_filter.go create mode 100644 pkg/leakybucket/tests/leaky-fixedqueue/bucket.yaml create mode 100644 pkg/leakybucket/tests/leaky-fixedqueue/scenarios.yaml create mode 100644 pkg/leakybucket/tests/leaky-fixedqueue/test.json create mode 100644 pkg/leakybucket/tests/leaky-scope-range-expression/bucket.yaml create mode 100644 pkg/leakybucket/tests/leaky-scope-range-expression/scenarios.yaml create mode 100644 pkg/leakybucket/tests/leaky-scope-range-expression/test.json create mode 100644 pkg/leakybucket/tests/overflow-with-meta-and-information/bucket.yaml create mode 100644 pkg/leakybucket/tests/overflow-with-meta-and-information/scenarios.yaml create mode 100644 pkg/leakybucket/tests/overflow-with-meta-and-information/test.json create mode 100644 pkg/leakybucket/tests/overflow-with-meta/bucket.yaml create mode 100644 pkg/leakybucket/tests/overflow-with-meta/scenarios.yaml create mode 100644 pkg/leakybucket/tests/overflow-with-meta/test.json create mode 100644 pkg/leakybucket/tests/simple-counter-bh/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-counter-bh/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-counter-bh/test.json create mode 100644 pkg/leakybucket/tests/simple-counter-timeout/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-counter-timeout/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-counter-timeout/test.json create mode 100644 pkg/leakybucket/tests/simple-counter/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-counter/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-counter/test.json create mode 100644 pkg/leakybucket/tests/simple-leaky-blackhole/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-blackhole/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-blackhole/test.json create mode 100644 pkg/leakybucket/tests/simple-leaky-cancel_on/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-cancel_on/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-cancel_on/test.json create mode 100644 pkg/leakybucket/tests/simple-leaky-overflow/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-overflow/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-overflow/test.json create mode 100644 pkg/leakybucket/tests/simple-leaky-ovflwfilter/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-ovflwfilter/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.json create mode 100644 pkg/leakybucket/tests/simple-leaky-underflow/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-underflow/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-underflow/test.json create mode 100644 pkg/leakybucket/tests/simple-leaky-uniq-cachesize/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-uniq-cachesize/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-uniq-cachesize/test.json create mode 100644 pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/in-buckets_state.json create mode 100644 pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.json create mode 100644 pkg/leakybucket/tests/simple-leaky-uniq/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-uniq/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-leaky-uniq/test.json create mode 100644 pkg/leakybucket/tests/simple-trigger-external-data/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-trigger-external-data/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-trigger-external-data/simple_patterns.txt create mode 100644 pkg/leakybucket/tests/simple-trigger-external-data/test.json create mode 100644 pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml create mode 100644 pkg/leakybucket/tests/simple-trigger-reprocess/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-trigger-reprocess/test.json create mode 100644 pkg/leakybucket/tests/simple-trigger/bucket.yaml create mode 100644 pkg/leakybucket/tests/simple-trigger/scenarios.yaml create mode 100644 pkg/leakybucket/tests/simple-trigger/test.json create mode 100644 pkg/leakybucket/timemachine.go create mode 100644 pkg/leakybucket/trigger.go create mode 100644 pkg/leakybucket/uniq.go create mode 100644 pkg/metabase/api.go create mode 100644 pkg/metabase/container.go create mode 100644 pkg/metabase/database.go create mode 100644 pkg/metabase/metabase.go create mode 100644 pkg/models/add_alerts_request.go create mode 100644 pkg/models/add_alerts_response.go create mode 100644 pkg/models/add_signals_request.go create mode 100644 pkg/models/add_signals_request_item.go create mode 100644 pkg/models/alert.go create mode 100644 pkg/models/decision.go create mode 100644 pkg/models/decisions_stream_response.go create mode 100644 pkg/models/delete_alerts_response.go create mode 100644 pkg/models/delete_decision_response.go create mode 100644 pkg/models/error_response.go create mode 100644 pkg/models/event.go create mode 100644 pkg/models/flush_decision_response.go create mode 100644 pkg/models/get_alerts_response.go create mode 100644 pkg/models/get_decisions_response.go create mode 100644 pkg/models/helpers.go create mode 100644 pkg/models/localapi_swagger.yaml create mode 100644 pkg/models/meta.go create mode 100644 pkg/models/metrics.go create mode 100644 pkg/models/metrics_agent_info.go create mode 100644 pkg/models/metrics_bouncer_info.go create mode 100644 pkg/models/source.go create mode 100644 pkg/models/topx_response.go create mode 100644 pkg/models/watcher_auth_request.go create mode 100644 pkg/models/watcher_auth_response.go create mode 100644 pkg/models/watcher_registration_request.go create mode 100644 pkg/parser/README.md create mode 100644 pkg/parser/enrich.go create mode 100644 pkg/parser/enrich_date.go create mode 100644 pkg/parser/enrich_date_test.go create mode 100644 pkg/parser/enrich_dns.go create mode 100644 pkg/parser/enrich_geoip.go create mode 100644 pkg/parser/node.go create mode 100644 pkg/parser/node_test.go create mode 100644 pkg/parser/parsing_test.go create mode 100644 pkg/parser/runtime.go create mode 100644 pkg/parser/stage.go create mode 100644 pkg/parser/test_data/GeoLite2-ASN.mmdb create mode 100644 pkg/parser/test_data/GeoLite2-City.mmdb create mode 100644 pkg/parser/tests/base-grok-expression/base-grok.yaml create mode 100644 pkg/parser/tests/base-grok-expression/parsers.yaml create mode 100644 pkg/parser/tests/base-grok-expression/test.yaml create mode 100644 pkg/parser/tests/base-grok-external-data/base-grok.yaml create mode 100644 pkg/parser/tests/base-grok-external-data/parsers.yaml create mode 100644 pkg/parser/tests/base-grok-external-data/test.yaml create mode 100644 pkg/parser/tests/base-grok-import/base-grok.yaml create mode 100644 pkg/parser/tests/base-grok-import/parsers.yaml create mode 100644 pkg/parser/tests/base-grok-import/test.yaml create mode 100644 pkg/parser/tests/base-grok-no-subnode/base-grok.yaml create mode 100644 pkg/parser/tests/base-grok-no-subnode/parsers.yaml create mode 100644 pkg/parser/tests/base-grok-no-subnode/test.yaml create mode 100644 pkg/parser/tests/base-grok/base-grok.yaml create mode 100644 pkg/parser/tests/base-grok/parsers.yaml create mode 100644 pkg/parser/tests/base-grok/test.yaml create mode 100644 pkg/parser/tests/base-json-extract/base-grok.yaml create mode 100644 pkg/parser/tests/base-json-extract/base-grok2.yaml create mode 100644 pkg/parser/tests/base-json-extract/parsers.yaml create mode 100644 pkg/parser/tests/base-json-extract/test.yaml create mode 100644 pkg/parser/tests/base-tree/base-grok.yaml create mode 100644 pkg/parser/tests/base-tree/parsers.yaml create mode 100644 pkg/parser/tests/base-tree/test.yaml create mode 100644 pkg/parser/tests/dateparser-enrich/base-grok.yaml create mode 100644 pkg/parser/tests/dateparser-enrich/parsers.yaml create mode 100644 pkg/parser/tests/dateparser-enrich/test.yaml create mode 100644 pkg/parser/tests/geoip-enrich/base-grok.yaml create mode 100644 pkg/parser/tests/geoip-enrich/parsers.yaml create mode 100644 pkg/parser/tests/geoip-enrich/test.yaml create mode 100644 pkg/parser/tests/multi-stage-grok/base-grok-s00.yaml create mode 100644 pkg/parser/tests/multi-stage-grok/base-grok-s01.yaml create mode 100644 pkg/parser/tests/multi-stage-grok/parsers.yaml create mode 100644 pkg/parser/tests/multi-stage-grok/test.yaml create mode 100644 pkg/parser/tests/reverse-dns-enrich/base-grok.yaml create mode 100644 pkg/parser/tests/reverse-dns-enrich/parsers.yaml create mode 100644 pkg/parser/tests/reverse-dns-enrich/test.yaml create mode 100644 pkg/parser/tests/sample_strings.txt create mode 100644 pkg/parser/tests/whitelist-base/base-grok.yaml create mode 100644 pkg/parser/tests/whitelist-base/parsers.yaml create mode 100644 pkg/parser/tests/whitelist-base/test.yaml create mode 100644 pkg/parser/unix_parser.go create mode 100644 pkg/parser/whitelist.go create mode 100644 pkg/protobufs/README.md create mode 100644 pkg/protobufs/notifier.pb.go create mode 100644 pkg/protobufs/notifier.proto create mode 100644 pkg/protobufs/plugin_interface.go create mode 100644 pkg/time/AUTHORS create mode 100644 pkg/time/CONTRIBUTING.md create mode 100644 pkg/time/CONTRIBUTORS create mode 100644 pkg/time/LICENSE create mode 100644 pkg/time/PATENTS create mode 100644 pkg/time/README.md create mode 100644 pkg/time/rate/rate.go create mode 100644 pkg/time/rate/rate_test.go create mode 100644 pkg/types/constants.go create mode 100644 pkg/types/dataset.go create mode 100644 pkg/types/dataset_test.go create mode 100644 pkg/types/event.go create mode 100644 pkg/types/grok_pattern.go create mode 100644 pkg/types/ip.go create mode 100644 pkg/types/ip_test.go create mode 100644 pkg/types/line.go create mode 100644 pkg/types/profile.go create mode 100644 pkg/types/utils.go create mode 100644 pkg/yamlpatch/merge.go create mode 100644 pkg/yamlpatch/merge_test.go create mode 100644 pkg/yamlpatch/patcher.go create mode 100644 pkg/yamlpatch/patcher_test.go create mode 100644 pkg/yamlpatch/testdata/base.yaml create mode 100644 pkg/yamlpatch/testdata/expect.yaml create mode 100644 pkg/yamlpatch/testdata/production.yaml create mode 100644 platform/freebsd.mk create mode 100644 platform/linux.mk create mode 100644 platform/openbsd.mk create mode 100644 platform/unix_common.mk create mode 100644 platform/windows.mk create mode 100644 plugins/notifications/dummy/LICENSE create mode 100644 plugins/notifications/dummy/Makefile create mode 100644 plugins/notifications/dummy/dummy.yaml create mode 100644 plugins/notifications/dummy/main.go create mode 100644 plugins/notifications/email/LICENSE create mode 100644 plugins/notifications/email/Makefile create mode 100644 plugins/notifications/email/email.yaml create mode 100644 plugins/notifications/email/go.mod create mode 100644 plugins/notifications/email/go.sum create mode 100644 plugins/notifications/email/main.go create mode 100644 plugins/notifications/http/LICENSE create mode 100644 plugins/notifications/http/Makefile create mode 100644 plugins/notifications/http/go.mod create mode 100644 plugins/notifications/http/go.sum create mode 100644 plugins/notifications/http/http.yaml create mode 100644 plugins/notifications/http/main.go create mode 100644 plugins/notifications/slack/LICENSE create mode 100644 plugins/notifications/slack/Makefile create mode 100644 plugins/notifications/slack/go.mod create mode 100644 plugins/notifications/slack/go.sum create mode 100644 plugins/notifications/slack/main.go create mode 100644 plugins/notifications/slack/slack.yaml create mode 100644 plugins/notifications/splunk/LICENSE create mode 100644 plugins/notifications/splunk/Makefile create mode 100644 plugins/notifications/splunk/go.mod create mode 100644 plugins/notifications/splunk/go.sum create mode 100644 plugins/notifications/splunk/main.go create mode 100644 plugins/notifications/splunk/splunk.yaml create mode 100644 rpm/SOURCES/80-crowdsec.preset create mode 100644 rpm/SOURCES/crowdsec.unit.patch create mode 100644 rpm/SOURCES/user.patch create mode 100644 rpm/SPECS/crowdsec.spec create mode 100644 scripts/check_go_version.ps1 create mode 100644 scripts/test_env.ps1 create mode 100755 scripts/test_env.sh create mode 100755 scripts/test_wizard_upgrade.sh create mode 100644 tests/.gitignore create mode 100644 tests/README.md create mode 100644 tests/ansible/.gitignore create mode 100644 tests/ansible/README.md create mode 100644 tests/ansible/ansible.cfg create mode 100755 tests/ansible/env/example.sh create mode 100755 tests/ansible/env/pkg-sqlite.sh create mode 100755 tests/ansible/env/source-mysql.sh create mode 100755 tests/ansible/env/source-pgx.sh create mode 100755 tests/ansible/env/source-postgres.sh create mode 100755 tests/ansible/env/source-sqlite.sh create mode 100644 tests/ansible/install_binary_package.yml create mode 100755 tests/ansible/prepare-run create mode 100644 tests/ansible/prepare_tests.yml create mode 100644 tests/ansible/provision_dependencies.yml create mode 100644 tests/ansible/provision_test_suite.yml create mode 100644 tests/ansible/requirements.yml create mode 100644 tests/ansible/roles/make_fixture/tasks/main.yml create mode 100644 tests/ansible/roles/make_fixture/vars/main.yml create mode 100644 tests/ansible/roles/run_func_tests/tasks/main.yml create mode 100644 tests/ansible/roles/run_func_tests/vars/main.yml create mode 100644 tests/ansible/run_all.yml create mode 100644 tests/ansible/run_tests.yml create mode 100644 tests/ansible/vagrant/alma-8/Vagrantfile create mode 100644 tests/ansible/vagrant/alma-9/Vagrantfile create mode 100644 tests/ansible/vagrant/centos-7/Vagrantfile create mode 100755 tests/ansible/vagrant/centos-7/skip create mode 100644 tests/ansible/vagrant/centos-8/Vagrantfile create mode 100644 tests/ansible/vagrant/centos-9/Vagrantfile create mode 100644 tests/ansible/vagrant/common create mode 100644 tests/ansible/vagrant/debian-10-buster/Vagrantfile create mode 100644 tests/ansible/vagrant/debian-11-bullseye/Vagrantfile create mode 100644 tests/ansible/vagrant/debian-9-stretch/Vagrantfile create mode 100755 tests/ansible/vagrant/debian-9-stretch/skip create mode 100644 tests/ansible/vagrant/debian-testing/Vagrantfile create mode 100644 tests/ansible/vagrant/experimental/alpine-3.16/Vagrantfile create mode 100755 tests/ansible/vagrant/experimental/alpine-3.16/bootstrap create mode 100755 tests/ansible/vagrant/experimental/alpine-3.16/skip create mode 100644 tests/ansible/vagrant/experimental/amazon-linux-2/Vagrantfile create mode 100644 tests/ansible/vagrant/experimental/amazon-linux-2/issues.txt create mode 100644 tests/ansible/vagrant/experimental/arch/Vagrantfile create mode 100644 tests/ansible/vagrant/experimental/devuan-3/Vagrantfile create mode 100755 tests/ansible/vagrant/experimental/devuan-3/skip create mode 100644 tests/ansible/vagrant/experimental/dragonflybsd-6/Vagrantfile create mode 100644 tests/ansible/vagrant/experimental/gentoo/Vagrantfile create mode 100755 tests/ansible/vagrant/experimental/gentoo/bootstrap create mode 100644 tests/ansible/vagrant/experimental/hardenedbsd-13/Vagrantfile create mode 100755 tests/ansible/vagrant/experimental/hardenedbsd-13/bootstrap create mode 100755 tests/ansible/vagrant/experimental/hardenedbsd-13/skip create mode 100644 tests/ansible/vagrant/experimental/netbsd-9/Vagrantfile create mode 100644 tests/ansible/vagrant/experimental/openbsd-7/Vagrantfile create mode 100755 tests/ansible/vagrant/experimental/openbsd-7/bootstrap create mode 100755 tests/ansible/vagrant/experimental/openbsd-7/skip create mode 100644 tests/ansible/vagrant/experimental/opensuse-15.4/Vagrantfile create mode 100644 tests/ansible/vagrant/experimental/ubuntu-14.04-trusty/Vagrantfile create mode 100644 tests/ansible/vagrant/fedora-33/Vagrantfile create mode 100755 tests/ansible/vagrant/fedora-33/skip create mode 100644 tests/ansible/vagrant/fedora-34/Vagrantfile create mode 100755 tests/ansible/vagrant/fedora-34/skip create mode 100644 tests/ansible/vagrant/fedora-35/Vagrantfile create mode 100755 tests/ansible/vagrant/fedora-35/skip create mode 100644 tests/ansible/vagrant/fedora-36/Vagrantfile create mode 100755 tests/ansible/vagrant/fedora-36/skip create mode 100644 tests/ansible/vagrant/freebsd-12/Vagrantfile create mode 100755 tests/ansible/vagrant/freebsd-12/skip create mode 100644 tests/ansible/vagrant/freebsd-13/Vagrantfile create mode 100755 tests/ansible/vagrant/freebsd-13/skip create mode 100644 tests/ansible/vagrant/oracle-7/Vagrantfile create mode 100755 tests/ansible/vagrant/oracle-7/skip create mode 100644 tests/ansible/vagrant/oracle-8/Vagrantfile create mode 100644 tests/ansible/vagrant/oracle-9/Vagrantfile create mode 100644 tests/ansible/vagrant/rocky-8/Vagrantfile create mode 100644 tests/ansible/vagrant/rocky-9/Vagrantfile create mode 100644 tests/ansible/vagrant/ubuntu-16.04-xenial/Vagrantfile create mode 100755 tests/ansible/vagrant/ubuntu-16.04-xenial/skip create mode 100644 tests/ansible/vagrant/ubuntu-18.04-bionic/Vagrantfile create mode 100644 tests/ansible/vagrant/ubuntu-20.04-focal/Vagrantfile create mode 100644 tests/ansible/vagrant/ubuntu-22.04-jammy/Vagrantfile create mode 100644 tests/ansible/vars/go.yml create mode 100644 tests/ansible/vars/mysql.yml create mode 100644 tests/ansible/vars/postgres.yml create mode 100644 tests/bats.mk create mode 100644 tests/bats/01_base.bats create mode 100644 tests/bats/01_crowdsec.bats create mode 100644 tests/bats/02_nolapi.bats create mode 100644 tests/bats/03_noagent.bats create mode 100644 tests/bats/04_capi.bats create mode 100644 tests/bats/04_nocapi.bats create mode 100644 tests/bats/05_config_yaml_local.bats create mode 100644 tests/bats/10_bouncers.bats create mode 100644 tests/bats/11_bouncers_tls.bats create mode 100644 tests/bats/20_collections.bats create mode 100644 tests/bats/30_machines.bats create mode 100644 tests/bats/30_machines_tls.bats create mode 100644 tests/bats/40_cold-logs.bats create mode 100644 tests/bats/40_live-ban.bats create mode 100644 tests/bats/50_simulation.bats create mode 100644 tests/bats/70_http_plugin.bats create mode 100644 tests/bats/71_dummy_plugin.bats create mode 100644 tests/bats/72_plugin_badconfig.bats create mode 100644 tests/bats/80_alerts.bats create mode 100644 tests/bats/90_decisions.bats create mode 100644 tests/bats/97_ipv4_single.bats create mode 100644 tests/bats/97_ipv6_single.bats create mode 100644 tests/bats/98_ipv4_range.bats create mode 100644 tests/bats/98_ipv6_range.bats create mode 100644 tests/bats/99_lapi-stream-mode-scenario.bats create mode 100644 tests/bats/99_lapi-stream-mode-scopes.bats create mode 100644 tests/bats/99_lapi-stream-mode.bats create mode 100755 tests/bats/reformat create mode 100644 tests/bats/testdata/cfssl/agent.json create mode 100644 tests/bats/testdata/cfssl/agent_invalid.json create mode 100644 tests/bats/testdata/cfssl/bouncer.json create mode 100644 tests/bats/testdata/cfssl/bouncer_invalid.json create mode 100644 tests/bats/testdata/cfssl/ca.json create mode 100644 tests/bats/testdata/cfssl/intermediate.json create mode 100644 tests/bats/testdata/cfssl/profiles.json create mode 100644 tests/bats/testdata/cfssl/server.json create mode 100755 tests/bin/assert-crowdsec-not-running create mode 100755 tests/bin/check-requirements create mode 100755 tests/bin/collect-hub-coverage create mode 100755 tests/bin/crowdsec-wrapper create mode 100755 tests/bin/cscli-wrapper create mode 100755 tests/bin/generate-hub-tests create mode 100644 tests/bin/mock-http.py create mode 100755 tests/bin/wait-for-port create mode 100755 tests/disable-capi create mode 100644 tests/dyn-bats/README.md create mode 100755 tests/enable-capi create mode 100755 tests/instance-crowdsec create mode 100755 tests/instance-data create mode 100755 tests/instance-db create mode 100755 tests/instance-mock-http create mode 100755 tests/lib/config/config-global create mode 100755 tests/lib/config/config-local create mode 100755 tests/lib/db/instance-mysql create mode 120000 tests/lib/db/instance-pgx create mode 100755 tests/lib/db/instance-postgres create mode 100755 tests/lib/db/instance-sqlite create mode 100755 tests/lib/init/crowdsec-daemon create mode 100755 tests/lib/init/crowdsec-systemd create mode 100755 tests/lib/setup.sh create mode 100755 tests/lib/setup_file.sh create mode 100755 tests/lib/teardown_file.sh create mode 100644 tests/localstack/docker-compose.yml create mode 100755 tests/run-tests create mode 100644 windows/Chocolatey/crowdsec/ReadMe.md create mode 100644 windows/Chocolatey/crowdsec/crowdsec.nuspec create mode 100644 windows/Chocolatey/crowdsec/tools/LICENSE.txt create mode 100644 windows/Chocolatey/crowdsec/tools/VERIFICATION.txt create mode 100644 windows/Chocolatey/crowdsec/tools/chocolateybeforemodify.ps1 create mode 100644 windows/Chocolatey/crowdsec/tools/chocolateyinstall.ps1 create mode 100644 windows/Chocolatey/crowdsec/tools/chocolateyuninstall.ps1 create mode 100644 windows/README.md create mode 100644 windows/install_dev_windows.ps1 create mode 100644 windows/install_installer_windows.ps1 create mode 100644 windows/installer/WixUI_HK.wxs create mode 100644 windows/installer/crowdsec_icon.ico create mode 100644 windows/installer/crowdsec_msi_top_banner.bmp create mode 100644 windows/installer/installer_dialog.bmp create mode 100644 windows/installer/product.wxs create mode 100755 wizard.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..1deda36 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +# We include .git in the build context because excluding it would break the +# "make release" target, which uses git to retrieve the build version and tag. +#.git diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml new file mode 100644 index 0000000..cacb5fa --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -0,0 +1,136 @@ +name: Bug report +description: Report a bug encountered while operating crowdsec +labels: bug +body: + - type: textarea + id: problem + attributes: + label: What happened? + description: | + Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner. + If this matter is security related, please disclose it privately to security@crowdsec.net + validations: + required: true + + - type: textarea + id: expected + attributes: + label: What did you expect to happen? + validations: + required: true + + - type: textarea + id: repro + attributes: + label: How can we reproduce it (as minimally and precisely as possible)? + validations: + required: true + + - type: textarea + id: additional + attributes: + label: Anything else we need to know? + + - type: textarea + id: Version + attributes: + label: Crowdsec version + value: | +
+ + ```console + $ cscli version + # paste output here + ``` + +
+ validations: + required: true + + - type: textarea + id: osVersion + attributes: + label: OS version + value: | +
+ + ```console + # On Linux: + $ cat /etc/os-release + # paste output here + $ uname -a + # paste output here + + # On Windows: + C:\> wmic os get Caption, Version, BuildNumber, OSArchitecture + # paste output here + ``` + +
+ + - type: textarea + id: collections + attributes: + label: Enabled collections and parsers + value: | +
+ + ```console + $ cscli hub list -o raw + # paste output here + ``` + +
+ + - type: textarea + id: acquis + attributes: + label: Acquisition config + value: | +
+ ```console + # On Linux: + $ cat /etc/crowdsec/acquis.yaml /etc/crowdsec/acquis.d/* + # paste output here + + # On Windows: + C:\> Get-Content C:\ProgramData\CrowdSec\config\acquis.yaml + # paste output here +
+ + - type: textarea + id: config + attributes: + label: Config show + value: | +
+ + ```console + $ cscli config show + # paste output here + ``` + +
+ + - type: textarea + id: metrics + attributes: + label: Prometheus metrics + value: | +
+ + ```console + $ cscli metrics + # paste output here + ``` + +
+ + - type: textarea + id: customizations + attributes: + label: "Related custom configs versions (if applicable) : notification plugins, custom scenarios, parsers etc." + value: | +
+ +
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..61de159 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,4 @@ +contact_links: + - name: Support Request + url: https://discourse.crowdsec.net + about: Support request or question relating to Crowdsec diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml new file mode 100644 index 0000000..6449cee --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -0,0 +1,19 @@ +name: Feature request +description: Suggest an improvement or a new feature +labels: enhancement +body: + - type: textarea + id: feature + attributes: + label: What would you like to be added? + description: | + Significant feature requests are unlikely to make progress as issues. Please consider engaging on discord (discord.gg/crowdsec) and forums (https://discourse.crowdsec.net), instead. + validations: + required: true + + - type: textarea + id: rationale + attributes: + label: Why is this needed? + validations: + required: true diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml new file mode 100644 index 0000000..8ed6bcf --- /dev/null +++ b/.github/release-drafter.yml @@ -0,0 +1,31 @@ +categories: + - title: 'New Features' + labels: + - 'new feature' + - title: 'Improvements' + labels: + - 'enhancement' + - 'improvement' + - title: 'Bug Fixes' + labels: + - 'fix' + - 'bugfix' + - 'bug' + - title: 'Documentation' + labels: + - 'documentation' + - 'doc' +tag-template: "- $TITLE @$AUTHOR (#$NUMBER)" +template: | + ## Changes + + $CHANGES + + ## Geolite2 notice + + This product includes GeoLite2 data created by MaxMind, available from https://www.maxmind.com. + + ## Installation + + Take a look at the [installation instructions](https://doc.crowdsec.net/docs/getting_started/install_crowdsec). + diff --git a/.github/workflows/.yamllint b/.github/workflows/.yamllint new file mode 120000 index 0000000..4a4652c --- /dev/null +++ b/.github/workflows/.yamllint @@ -0,0 +1 @@ +../../.yamllint \ No newline at end of file diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml new file mode 100644 index 0000000..b24f2db --- /dev/null +++ b/.github/workflows/bats-hub.yml @@ -0,0 +1,77 @@ +name: Hub tests + +on: + workflow_call: + secrets: + GIST_BADGES_SECRET: + required: true + GIST_BADGES_ID: + required: true + +env: + PREFIX_TEST_NAMES_WITH_FILE: true + +jobs: + build: + name: "Build + tests" + runs-on: ubuntu-latest + timeout-minutes: 20 + steps: + + - name: "Force machineid" + run: | + sudo chmod +w /etc/machine-id + echo githubciXXXXXXXXXXXXXXXXXXXXXXXX | sudo tee /etc/machine-id + + - name: "Set up Go 1.19" + uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + + - name: "Clone CrowdSec" + uses: actions/checkout@v3 + with: + fetch-depth: 0 + submodules: true + + - name: "Install bats dependencies" + run: | + sudo apt -qq -y -o=Dpkg::Use-Pty=0 install build-essential daemonize jq netcat-openbsd + go install github.com/mikefarah/yq/v4@latest + go install github.com/cloudflare/cfssl/cmd/cfssl@master + go install github.com/cloudflare/cfssl/cmd/cfssljson@master + sudo cp -u ~/go/bin/yq /usr/local/bin/ + sudo cp -u ~/go/bin/cfssl /usr/local/bin + sudo cp -u ~/go/bin/cfssljson /usr/local/bin + + - name: "Build crowdsec and fixture" + run: make bats-clean bats-build bats-fixture + + - name: "Run hub tests" + run: make bats-test-hub + + - name: "Collect hub coverage" + run: ./tests/bin/collect-hub-coverage >> $GITHUB_ENV + + - name: "Create Parsers badge" + uses: schneegans/dynamic-badges-action@v1.1.0 + if: ${{ github.ref == 'refs/heads/master' && github.repository_owner == 'crowdsecurity' }} + with: + auth: ${{ secrets.GIST_BADGES_SECRET }} + gistID: ${{ secrets.GIST_BADGES_ID }} + filename: crowdsec_parsers_badge.json + label: Hub Parsers + message: ${{ env.PARSERS_COV }} + color: ${{ env.SCENARIO_BADGE_COLOR }} + + - name: "Create Scenarios badge" + uses: schneegans/dynamic-badges-action@v1.1.0 + if: ${{ github.ref == 'refs/heads/master' && github.repository_owner == 'crowdsecurity' }} + with: + auth: ${{ secrets.GIST_BADGES_SECRET }} + gistID: ${{ secrets.GIST_BADGES_ID }} + filename: crowdsec_scenarios_badge.json + label: Hub Scenarios + message: ${{ env.SCENARIOS_COV }} + color: ${{ env.SCENARIO_BADGE_COLOR }} diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml new file mode 100644 index 0000000..4987cc2 --- /dev/null +++ b/.github/workflows/bats-mysql.yml @@ -0,0 +1,96 @@ +name: Functional tests (MySQL) + +on: + workflow_call: + inputs: + database_image: + required: true + type: string + +env: + PREFIX_TEST_NAMES_WITH_FILE: true + +jobs: + + build: + name: "Build + tests" + runs-on: ubuntu-latest + timeout-minutes: 20 + services: + database: + image: ${{ inputs.database_image }} + env: + MYSQL_ROOT_PASSWORD: "secret" + ports: + - 3306:3306 + + steps: + + - name: "Force machineid" + run: | + sudo chmod +w /etc/machine-id + echo githubciXXXXXXXXXXXXXXXXXXXXXXXX | sudo tee /etc/machine-id + + - name: "Set up Go 1.19" + uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + + - name: "Check out CrowdSec repository" + uses: actions/checkout@v3 + with: + fetch-depth: 0 + submodules: true + + - name: "Install bats dependencies" + run: | + sudo apt -qq -y -o=Dpkg::Use-Pty=0 install build-essential daemonize jq netcat-openbsd + go install github.com/mikefarah/yq/v4@latest + go install github.com/cloudflare/cfssl/cmd/cfssl@master + go install github.com/cloudflare/cfssl/cmd/cfssljson@master + sudo cp -u ~/go/bin/yq ~/go/bin/cfssl ~/go/bin/cfssljson /usr/local/bin/ + + - name: "Build crowdsec and fixture" + run: | + make clean bats-build bats-fixture + env: + DB_BACKEND: mysql + MYSQL_HOST: 127.0.0.1 + MYSQL_PORT: 3306 + MYSQL_PASSWORD: "secret" + MYSQL_USER: root + + - name: "Run tests" + run: make bats-test + env: + DB_BACKEND: mysql + MYSQL_HOST: 127.0.0.1 + MYSQL_PORT: 3306 + MYSQL_PASSWORD: "secret" + MYSQL_USER: root + + # + # In case you need to inspect the database status after the failure of a given test + # + # - name: "Run specified tests" + # run: ./tests/run-tests tests/bats/.bats -f "" + + - name: Show database dump + run: ./tests/instance-db dump /dev/fd/1 + env: + DB_BACKEND: mysql + MYSQL_HOST: 127.0.0.1 + MYSQL_PORT: 3306 + MYSQL_PASSWORD: "secret" + MYSQL_USER: root + if: ${{ always() }} + + - name: "Show crowdsec logs" + run: + for file in $(find ./tests/local/var/log -type f); do echo ">>>>> $file"; cat $file; echo; done + if: ${{ always() }} + + - name: "Show database logs" + run: docker logs "${{ job.services.database.id }}" + if: ${{ always() }} diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml new file mode 100644 index 0000000..80be5bf --- /dev/null +++ b/.github/workflows/bats-postgres.yml @@ -0,0 +1,99 @@ +name: Functional tests (Postgres) + +on: + workflow_call: + +env: + PREFIX_TEST_NAMES_WITH_FILE: true + +jobs: + + build: + name: "Build + tests" + runs-on: ubuntu-latest + timeout-minutes: 20 + services: + database: + image: postgres:14 + env: + POSTGRES_PASSWORD: "secret" + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready -u postgres + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + + - name: "Force machineid" + run: | + sudo chmod +w /etc/machine-id + echo githubciXXXXXXXXXXXXXXXXXXXXXXXX | sudo tee /etc/machine-id + + - name: "Set up Go 1.19" + uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + + - name: "Check out CrowdSec repository" + uses: actions/checkout@v3 + with: + fetch-depth: 0 + submodules: true + + - name: "Install bats dependencies" + run: | + sudo apt -qq -y -o=Dpkg::Use-Pty=0 install build-essential daemonize jq netcat-openbsd + go install github.com/mikefarah/yq/v4@latest + go install github.com/cloudflare/cfssl/cmd/cfssl@master + go install github.com/cloudflare/cfssl/cmd/cfssljson@master + sudo cp -u ~/go/bin/yq ~/go/bin/cfssl ~/go/bin/cfssljson /usr/local/bin/ + + - name: "Build crowdsec and fixture (DB_BACKEND: pgx)" + run: | + make clean bats-build bats-fixture + env: + DB_BACKEND: pgx + PGHOST: 127.0.0.1 + PGPORT: 5432 + PGPASSWORD: "secret" + PGUSER: postgres + + - name: "Run tests (DB_BACKEND: pgx)" + run: make bats-test + env: + DB_BACKEND: pgx + PGHOST: 127.0.0.1 + PGPORT: 5432 + PGPASSWORD: "secret" + PGUSER: postgres + +# - name: "Build crowdsec and fixture (DB_BACKEND: postgres)" +# run: make clean bats-build bats-fixture +# env: +# DB_BACKEND: postgres +# PGHOST: 127.0.0.1 +# PGPORT: 5432 +# PGPASSWORD: "secret" +# PGUSER: postgres +# +# - name: "Run tests (DB_BACKEND: postgres)" +# run: make bats-test +# env: +# DB_BACKEND: postgres +# PGHOST: 127.0.0.1 +# PGPORT: 5432 +# PGPASSWORD: "secret" +# PGUSER: postgres + + - name: "Show crowdsec logs" + run: + for file in $(find ./tests/local/var/log -type f); do echo ">>>>> $file"; cat $file; echo; done + if: ${{ always() }} + + - name: "Show database logs" + run: docker logs "${{ job.services.database.id }}" + if: ${{ always() }} diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml new file mode 100644 index 0000000..36d2a11 --- /dev/null +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -0,0 +1,80 @@ +name: Functional tests (sqlite) + +on: + workflow_call: + +env: + PREFIX_TEST_NAMES_WITH_FILE: true + TEST_COVERAGE: true + +jobs: + + build: + name: "Build + tests" + runs-on: ubuntu-latest + timeout-minutes: 20 + + steps: + + - name: "Force machineid" + run: | + sudo chmod +w /etc/machine-id + echo githubciXXXXXXXXXXXXXXXXXXXXXXXX | sudo tee /etc/machine-id + + - name: "Set up Go 1.19" + uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + + - name: "Check out CrowdSec repository" + uses: actions/checkout@v3 + with: + fetch-depth: 0 + submodules: true + + - name: "Install bats dependencies" + run: | + sudo apt -qq -y -o=Dpkg::Use-Pty=0 install build-essential daemonize jq netcat-openbsd + go install github.com/mikefarah/yq/v4@latest + go install github.com/cloudflare/cfssl/cmd/cfssl@master + go install github.com/cloudflare/cfssl/cmd/cfssljson@master + sudo cp -u ~/go/bin/yq ~/go/bin/cfssl ~/go/bin/cfssljson /usr/local/bin/ + go install github.com/wadey/gocovmerge@latest + sudo cp -u ~/go/bin/gocovmerge /usr/local/bin/ + + - name: "Build crowdsec and fixture" + run: | + make clean bats-build bats-fixture + + - name: "Run tests" + run: make bats-test + + # + # In case you need to inspect the database status after the failure of a given test + # + # - name: "Run specified tests" + # run: ./tests/run-tests tests/bats/.bats -f "" + + - name: "Show database dump" + run: | + ./tests/instance-crowdsec stop + sqlite3 ./tests/local/var/lib/crowdsec/data/crowdsec.db '.dump' + if: ${{ always() }} + + - name: "Show crowdsec logs" + run: + for file in $(find ./tests/local/var/log -type f); do echo ">>>>> $file"; cat $file; echo; done + if: ${{ always() }} + + - name: Upload crowdsec coverage to codecov + uses: codecov/codecov-action@v3 + with: + files: ./tests/local/var/lib/coverage/coverage-crowdsec.out + flags: func-crowdsec + + - name: Upload cscli coverage to codecov + uses: codecov/codecov-action@v3 + with: + files: ./tests/local/var/lib/coverage/coverage-cscli.out + flags: func-cscli diff --git a/.github/workflows/bats.yml b/.github/workflows/bats.yml new file mode 100644 index 0000000..f8ba05e --- /dev/null +++ b/.github/workflows/bats.yml @@ -0,0 +1,50 @@ +--- +# This workflow is actually running +# only functional tests, but the +# name is used for the badge in README.md + +name: Tests + +# Main workflow for functional tests, it calls all the others through parallel jobs. +# +# https://docs.github.com/en/actions/using-workflows/reusing-workflows +# +# There is no need to merge coverage output because codecov.io should take care of that. + +on: + push: + branches: [master] + paths-ignore: + - 'README.md' + pull_request: + branches: [master] + paths-ignore: + - 'README.md' + +jobs: + + sqlite: + uses: ./.github/workflows/bats-sqlite-coverage.yml + + # Jobs for Postgres (and sometimes MySQL) can have failing tests on GitHub + # CI, but they pass when run on devs' machines or in the release checks. We + # disable them here by default. Remove the if..false to enable them. + + mariadb: + uses: ./.github/workflows/bats-mysql.yml + with: + database_image: mariadb:latest + + mysql: + uses: ./.github/workflows/bats-mysql.yml + with: + database_image: mysql:latest + + postgres: + uses: ./.github/workflows/bats-postgres.yml + + hub: + uses: ./.github/workflows/bats-hub.yml + secrets: + GIST_BADGES_ID: ${{ secrets.GIST_BADGES_ID }} + GIST_BADGES_SECRET: ${{ secrets.GIST_BADGES_SECRET }} diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml new file mode 100644 index 0000000..d65a5cb --- /dev/null +++ b/.github/workflows/ci-windows-build-msi.yml @@ -0,0 +1,37 @@ +name: build-msi (windows) + +on: + pull_request: + branches: [ master ] + paths-ignore: + - 'docs/**' + - 'mkdocs.yml' + - 'README.md' + +jobs: + + build: + name: Build + runs-on: windows-2019 + steps: + - name: Set up Go 1.19 + uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + - id: get_latest_release + uses: pozetroninc/github-action-get-latest-release@master + with: + repository: crowdsecurity/crowdsec + excludes: draft + - id: set_release_in_env + run: echo "BUILD_VERSION=${{ steps.get_latest_release.outputs.release }}" >> $env:GITHUB_ENV + - name: Build + run: make windows_installer + - name: Upload MSI + uses: actions/upload-artifact@v2 + with: + path: crowdsec*msi + name: crowdsec.msi diff --git a/.github/workflows/ci_golangci-lint.yml b/.github/workflows/ci_golangci-lint.yml new file mode 100644 index 0000000..d45b42e --- /dev/null +++ b/.github/workflows/ci_golangci-lint.yml @@ -0,0 +1,47 @@ +name: golangci-lint + +on: + push: + tags: + - v* + branches: + - master + paths-ignore: + - 'docs/**' + - 'mkdocs.yml' + - 'README.md' + pull_request: + paths-ignore: + - 'docs/**' + - 'mkdocs.yml' + - 'README.md' +jobs: + golangci: + strategy: + matrix: + os: [ubuntu-latest, windows-2022] + name: lint + runs-on: ${{ matrix.os }} + steps: + - name: Set up Go 1.19 + uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + - uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version + version: v1.50 + # Optional: golangci-lint command line arguments. + args: --issues-exit-code=1 --timeout 10m + # Optional: show only new issues if it's a pull request. The default value is `false`. + only-new-issues: false + # Optional: if set to true then the all caching functionality will be complete disabled, + # takes precedence over all other caching options. + skip-cache: false + # Optional: if set to true then the action don't cache or restore ~/go/pkg. + skip-pkg-cache: false + # Optional: if set to true then the action don't cache or restore ~/.cache/go-build. + skip-build-cache: false diff --git a/.github/workflows/ci_release-drafter.yml b/.github/workflows/ci_release-drafter.yml new file mode 100644 index 0000000..d4aea0e --- /dev/null +++ b/.github/workflows/ci_release-drafter.yml @@ -0,0 +1,20 @@ +name: Release Drafter + +on: + push: + # branches to consider in the event; optional, defaults to all + branches: + - master + +jobs: + update_release_draft: + runs-on: ubuntu-latest + steps: + # Drafts your next Release notes as Pull Requests are merged into "master" + - uses: release-drafter/release-drafter@v5 + with: + config-name: release-drafter.yml + # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml + # config-name: my-config.yml + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000..3cb8d16 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,72 @@ +# yamllint disable rule:comments +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ master ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ master ] + schedule: + - cron: '15 16 * * 2' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] + # Learn more: + # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/dispatch_ci_hub.yaml b/.github/workflows/dispatch_ci_hub.yaml new file mode 100644 index 0000000..d6ac466 --- /dev/null +++ b/.github/workflows/dispatch_ci_hub.yaml @@ -0,0 +1,21 @@ +name: Dispatch to hub when creating pre-release + +on: + push: + # branches to consider in the event; optional, defaults to all + branches: + - master + +jobs: + dispatch: + name: dispatch to hub-tests + runs-on: ubuntu-latest + steps: + - name: Repository Dispatch + uses: peter-evans/repository-dispatch@v1 + if: ${{ github.repository_owner == 'crowdsecurity' }} + with: + token: ${{ secrets.DISPATCH_TOKEN }} + event-type: trigger_ci_hub + repository: crowdsecurity/hub + client-payload: '{"version": "${{ steps.keydb.outputs.release }}"}' diff --git a/.github/workflows/dispatch_create_branch_hub.yaml b/.github/workflows/dispatch_create_branch_hub.yaml new file mode 100644 index 0000000..38d6f11 --- /dev/null +++ b/.github/workflows/dispatch_create_branch_hub.yaml @@ -0,0 +1,24 @@ +name: Dispatch to hub when creating pre-release + +on: + release: + types: prereleased + +jobs: + dispatch: + name: dispatch to hub-tests + runs-on: ubuntu-latest + steps: + - id: keydb + uses: pozetroninc/github-action-get-latest-release@master + with: + owner: crowdsecurity + repo: crowdsec + excludes: prerelease, draft + - name: Repository Dispatch + uses: peter-evans/repository-dispatch@v1 + with: + token: ${{ secrets.DISPATCH_TOKEN }} + event-type: create_branch + repository: crowdsecurity/hub + client-payload: '{"version": "${{ steps.keydb.outputs.release }}"}' diff --git a/.github/workflows/dispatch_delete_branch_hub.yaml b/.github/workflows/dispatch_delete_branch_hub.yaml new file mode 100644 index 0000000..6a29869 --- /dev/null +++ b/.github/workflows/dispatch_delete_branch_hub.yaml @@ -0,0 +1,24 @@ +name: Dispatch to hub when deleting pre-release + +on: + release: + types: deleted + +jobs: + dispatch: + name: dispatch to hub-tests + runs-on: ubuntu-latest + steps: + - id: keydb + uses: pozetroninc/github-action-get-latest-release@master + with: + owner: crowdsecurity + repo: crowdsec + excludes: prerelease, draft + - name: Repository Dispatch + uses: peter-evans/repository-dispatch@v1 + with: + token: ${{ secrets.DISPATCH_TOKEN }} + event-type: delete_branch + repository: crowdsecurity/hub + client-payload: '{"version": "${{ steps.keydb.outputs.release }}"}' diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml new file mode 100644 index 0000000..b2dc48c --- /dev/null +++ b/.github/workflows/go-tests-windows.yml @@ -0,0 +1,51 @@ +name: Go tests (windows) + +on: + push: + branches: [master] + paths-ignore: + - 'README.md' + pull_request: + branches: [master] + paths-ignore: + - 'README.md' + +env: + RICHGO_FORCE_COLOR: 1 + +jobs: + + build: + name: "Build + tests" + runs-on: windows-2022 + + steps: + + - name: "Set up Go 1.19" + uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + + - name: Check out CrowdSec repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 + submodules: false + + - name: Build + run: | + make build + + - name: Run tests + run: | + go install github.com/kyoh86/richgo@v0.3.10 + go test -coverprofile coverage.out -covermode=atomic ./... > out.txt + if(!$?) { cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter; Exit 1 } + cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter + + - name: Upload unit coverage to Codecov + uses: codecov/codecov-action@v3 + with: + files: coverage.out + flags: unit-windows diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml new file mode 100644 index 0000000..982880f --- /dev/null +++ b/.github/workflows/go-tests.yml @@ -0,0 +1,143 @@ +--- +# This workflow is actually running +# tests (with localstack) but the +# name is used for the badge in README.md + +name: Build + +on: + push: + branches: [master] + paths-ignore: + - 'README.md' + pull_request: + branches: [master] + paths-ignore: + - 'README.md' + +# these env variables are for localstack, so we can emulate aws services +env: + RICHGO_FORCE_COLOR: 1 + AWS_HOST: localstack + SERVICES: cloudwatch,logs,kinesis + # these are to mimic aws config + AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE + AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + AWS_REGION: us-east-1 + # and to override our endpoint in aws sdk + AWS_ENDPOINT_FORCE: http://localhost:4566 + KINESIS_INITIALIZE_STREAMS: "stream-1-shard:1,stream-2-shards:2" + +jobs: + + build: + name: "Build + tests" + runs-on: ubuntu-latest + services: + localstack: + image: localstack/localstack:0.13.3 + ports: + - 4566:4566 # Localstack exposes all services on the same port + env: + SERVICES: ${{ env.SERVICES }} + DEBUG: "" + DATA_DIR: "" + LAMBDA_EXECUTOR: "" + KINESIS_ERROR_PROBABILITY: "" + DOCKER_HOST: unix:///var/run/docker.sock + HOST_TMP_FOLDER: "/tmp" + KINESIS_INITIALIZE_STREAMS: ${{ env.KINESIS_INITIALIZE_STREAMS }} + HOSTNAME_EXTERNAL: ${{ env.AWS_HOST }} # Required so that resource urls are provided properly + # e.g sqs url will get localhost if we don't set this env to map our service + options: >- + --name=localstack + --health-cmd="curl -sS 127.0.0.1:4566 || exit 1" + --health-interval=10s + --health-timeout=5s + --health-retries=3 + zoo1: + image: confluentinc/cp-zookeeper:7.1.1 + ports: + - "2181:2181" + env: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_SERVER_ID: 1 + ZOOKEEPER_SERVERS: zoo1:2888:3888 + options: >- + --name=zoo1 + --health-cmd "jps -l | grep zookeeper" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + kafka1: + image: crowdsecurity/kafka-ssl + ports: + - "9093:9093" + - "9092:9092" + - "9999:9999" + env: + KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://127.0.0.1:19092,LISTENER_DOCKER_EXTERNAL://127.0.0.1:9092,LISTENER_DOCKER_EXTERNAL_SSL://127.0.0.1:9093 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL_SSL:SSL + KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL + KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181" + KAFKA_BROKER_ID: 1 + KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO" + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_JMX_PORT: 9999 + KAFKA_JMX_HOSTNAME: "127.0.0.1" + KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer + KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" + KAFKA_SSL_KEYSTORE_FILENAME: kafka.kafka1.keystore.jks + KAFKA_SSL_KEYSTORE_CREDENTIALS: kafka1_keystore_creds + KAFKA_SSL_KEY_CREDENTIALS: kafka1_sslkey_creds + KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.kafka1.truststore.jks + KAFKA_SSL_TRUSTSTORE_CREDENTIALS: kafka1_truststore_creds + KAFKA_SSL_ENABLED_PROTOCOLS: TLSv1.2 + KAFKA_SSL_PROTOCOL: TLSv1.2 + KAFKA_SSL_CLIENT_AUTH: none + KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" + options: >- + --name=kafka1 + --health-cmd "kafka-broker-api-versions --version" + --health-interval 10s + --health-timeout 10s + --health-retries 5 + + steps: + + - name: "Set up Go 1.19" + uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + + - name: Check out CrowdSec repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 + submodules: false + + - name: Build and run tests + run: | + go install github.com/ory/go-acc@v0.2.8 + go install github.com/kyoh86/richgo@v0.3.10 + set -o pipefail + make build + go-acc ./... -o coverage.out --ignore database,notifications,protobufs,cwversion,cstest,models \ + | sed 's/ *coverage:.*of statements in.*//' \ + | richgo testfilter + + - name: Build and run tests (static) + run: | + make clean build BUILD_STATIC=yes + make test \ + | richgo testfilter + + - name: Upload unit coverage to Codecov + uses: codecov/codecov-action@v3 + with: + files: coverage.out + flags: unit-linux diff --git a/.github/workflows/release_publish-package.yml b/.github/workflows/release_publish-package.yml new file mode 100644 index 0000000..0e3c837 --- /dev/null +++ b/.github/workflows/release_publish-package.yml @@ -0,0 +1,48 @@ +# .github/workflows/build-docker-image.yml +name: build + +on: + release: + types: prereleased + +jobs: + build: + name: Build and upload binary package + runs-on: ubuntu-latest + steps: + - name: Set up Go 1.19 + uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build the binaries + run: make release + - name: Upload to release + uses: JasonEtco/upload-to-release@master + with: + args: crowdsec-release.tgz application/x-gzip + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + build_static: + name: Build and upload binary package + runs-on: ubuntu-latest + steps: + - name: Set up Go 1.19 + uses: actions/setup-go@v3 + with: + go-version: 1.19 + id: go + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build the binaries + run: | + make release BUILD_STATIC=yes + mv crowdsec-release.tgz crowdsec-release.static.tgz + - name: Upload to release + uses: JasonEtco/upload-to-release@master + with: + args: crowdsec-release-static.tgz application/x-gzip + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release_publish_docker-image-debian.yml b/.github/workflows/release_publish_docker-image-debian.yml new file mode 100644 index 0000000..620623e --- /dev/null +++ b/.github/workflows/release_publish_docker-image-debian.yml @@ -0,0 +1,62 @@ +name: Publish Docker Debian image + +on: + release: + types: + - released + - prereleased + workflow_dispatch: + +jobs: + push_to_registry: + name: Push Docker debian image to Docker Hub + runs-on: ubuntu-latest + steps: + - + name: Check out the repo + uses: actions/checkout@v3 + - + name: Prepare + id: prep + run: | + DOCKER_IMAGE=crowdsecurity/crowdsec + VERSION=bullseye + if [[ $GITHUB_REF == refs/tags/* ]]; then + VERSION=${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/* ]]; then + VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -E 's#/+#-#g') + elif [[ $GITHUB_REF == refs/pull/* ]]; then + VERSION=pr-${{ github.event.number }} + fi + TAGS="${DOCKER_IMAGE}:${VERSION}-debian" + if [[ ${{ github.event.action }} == released ]]; then + TAGS=$TAGS,${DOCKER_IMAGE}:latest-debian + fi + echo ::set-output name=version::${VERSION} + echo ::set-output name=tags::${TAGS} + echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ') + - + name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - + name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - + name: Build and push + uses: docker/build-push-action@v2 + with: + context: . + file: ./Dockerfile.debian + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.prep.outputs.tags }} + platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6,linux/386 + labels: | + org.opencontainers.image.source=${{ github.event.repository.html_url }} + org.opencontainers.image.created=${{ steps.prep.outputs.created }} + org.opencontainers.image.revision=${{ github.sha }} diff --git a/.github/workflows/release_publish_docker-image.yml b/.github/workflows/release_publish_docker-image.yml new file mode 100644 index 0000000..7af40b3 --- /dev/null +++ b/.github/workflows/release_publish_docker-image.yml @@ -0,0 +1,89 @@ +name: Publish Docker image + +on: + release: + types: + - released + - prereleased + +jobs: + push_to_registry: + name: Push Docker image to Docker Hub + runs-on: ubuntu-latest + steps: + - + name: Check out the repo + uses: actions/checkout@v3 + - + name: Prepare + id: prep + run: | + DOCKER_IMAGE=crowdsecurity/crowdsec + GHCR_IMAGE=ghcr.io/${{ github.repository_owner }}/crowdsec + VERSION=edge + if [[ $GITHUB_REF == refs/tags/* ]]; then + VERSION=${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/* ]]; then + VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -E 's#/+#-#g') + elif [[ $GITHUB_REF == refs/pull/* ]]; then + VERSION=pr-${{ github.event.number }} + fi + TAGS="${DOCKER_IMAGE}:${VERSION},${GHCR_IMAGE}:${VERSION}" + TAGS_SLIM="${DOCKER_IMAGE}:${VERSION}-slim" + if [[ ${{ github.event.action }} == released ]]; then + TAGS=$TAGS,${DOCKER_IMAGE}:latest,${GHCR_IMAGE}:latest + TAGS_SLIM=$TAGS,${DOCKER_IMAGE}:slim + fi + echo ::set-output name=version::${VERSION} + echo ::set-output name=tags::${TAGS} + echo ::set-output name=tags_slim::${TAGS_SLIM} + echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ') + - + name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - + name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v1.12.0 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - + name: Build and push slim image + uses: docker/build-push-action@v2 + with: + context: . + file: ./Dockerfile + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.prep.outputs.tags_slim }} + build-args: | + BUILD_ENV=slim + platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6,linux/386 + labels: | + org.opencontainers.image.source=${{ github.event.repository.html_url }} + org.opencontainers.image.created=${{ steps.prep.outputs.created }} + org.opencontainers.image.revision=${{ github.sha }} + + - + name: Build and push full image + uses: docker/build-push-action@v2 + with: + context: . + file: ./Dockerfile + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.prep.outputs.tags }} + platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6,linux/386 + labels: | + org.opencontainers.image.source=${{ github.event.repository.html_url }} + org.opencontainers.image.created=${{ steps.prep.outputs.created }} + org.opencontainers.image.revision=${{ github.sha }} diff --git a/.github/workflows/update_docker_hub_doc.yml b/.github/workflows/update_docker_hub_doc.yml new file mode 100644 index 0000000..0a5047d --- /dev/null +++ b/.github/workflows/update_docker_hub_doc.yml @@ -0,0 +1,26 @@ +name: Update Docker Hub README + +on: + push: + branches: + - master + paths: + - 'docker/README.md' + +jobs: + update-docker-hub-readme: + runs-on: ubuntu-latest + steps: + - + name: Check out the repo + uses: actions/checkout@v3 + if: ${{ github.repository_owner == 'crowdsecurity' }} + - + name: Update docker hub README + uses: ms-jpq/sync-dockerhub-readme@v1 + if: ${{ github.repository_owner == 'crowdsecurity' }} + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + repository: crowdsecurity/crowdsec + readme: "./docker/README.md" diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..163232a --- /dev/null +++ b/.gitignore @@ -0,0 +1,46 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*~ +.pc +.vscode + +# Test binaries, built with `go test -c` +*.test +*.cover + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Development artifacts, backups, etc +*.swp +*.swo + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# crowdsec binaries +cmd/crowdsec-cli/cscli +cmd/crowdsec/crowdsec +plugins/notifications/http/notification-http +plugins/notifications/slack/notification-slack +plugins/notifications/splunk/notification-splunk +plugins/notifications/email/notification-email +plugins/notifications/dummy/notification-dummy + +#test binaries +pkg/csplugin/tests/cs_plugin_test* + +#test cache (downloaded files) +.cache + +#release stuff +crowdsec-v* +pkg/cwhub/hubdir/.index.json +msi +*.msi +**/*.nupkg +*.tgz diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..0bf1bf5 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,16 @@ +[submodule "tests/lib/bats-core"] + path = tests/lib/bats-core + url = https://github.com/crowdsecurity/bats-core.git + branch = v1.7.0 +[submodule "tests/lib/bats-file"] + path = tests/lib/bats-file + url = https://github.com/crowdsecurity/bats-file.git +[submodule "tests/lib/bats-assert"] + path = tests/lib/bats-assert + url = https://github.com/crowdsecurity/bats-assert.git +[submodule "tests/lib/bats-support"] + path = tests/lib/bats-support + url = https://github.com/crowdsecurity/bats-support.git +[submodule "tests/lib/bats-mock"] + path = tests/lib/bats-mock + url = https://github.com/crowdsecurity/bats-mock.git diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..3047402 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,221 @@ +# https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml + +run: + skip-dirs: + - pkg/time/rate + skip-files: + - pkg/database/ent/generate.go + - pkg/yamlpatch/merge_test.go + +linters-settings: + gocyclo: + min-complexity: 30 + + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: -1 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: -1 + + govet: + check-shadowing: true + + lll: + line-length: 140 + + misspell: + locale: US + + nolintlint: + allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space) + allow-unused: false # report any unused nolint directives + require-explanation: false # don't require an explanation for nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + + interfacebloat: + max: 12 + +linters: + enable-all: true + disable: + # + # DEPRECATED by golangi-lint + # + - deadcode # The owner seems to have abandoned the linter. Replaced by unused. + - exhaustivestruct # The owner seems to have abandoned the linter. Replaced by exhaustruct. + - golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes + - ifshort # Checks that your code uses short syntax for if-statements whenever possible + - interfacer # Linter that suggests narrower interface types + - maligned # Tool to detect Go structs that would take less memory if their fields were sorted + - nosnakecase # nosnakecase is a linter that detects snake case of variable naming and function name. + - scopelint # Scopelint checks for unpinned variables in go programs + - structcheck # The owner seems to have abandoned the linter. Replaced by unused. + - varcheck # The owner seems to have abandoned the linter. Replaced by unused. + + # + # Enabled + # + + # - asasalint # check for pass []any as any in variadic func(...any) + # - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers + # - bidichk # Checks for dangerous unicode character sequences + # - decorder # check declaration order and count of types, constants, variables and functions + # - depguard # Go linter that checks if package imports are in a list of acceptable packages + # - dupword # checks for duplicate words in the source code + # - durationcheck # check for two durations multiplied together + # - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases + # - exportloopref # checks for pointers to enclosing loop variables + # - funlen # Tool for detection of long functions + # - gochecknoinits # Checks that no init functions are present in Go code + # - gocritic # Provides diagnostics that check for bugs, performance and style issues. + # - goheader # Checks is file header matches to pattern + # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. + # - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. + # - goprintffuncname # Checks that printf-like functions are named with `f` at the end + # - gosimple # (megacheck): Linter for Go source code that specializes in simplifying a code + # - govet # (vet, vetshadow): Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + # - grouper # An analyzer to analyze expression groups. + # - importas # Enforces consistent import aliases + # - ineffassign # Detects when assignments to existing variables are not used + # - interfacebloat # A linter that checks the number of methods inside an interface. + # - logrlint # Check logr arguments. + # - makezero # Finds slice declarations with non-zero initial length + # - misspell # Finds commonly misspelled English words in comments + # - nilerr # Finds the code that returns nil even if it checks that the error is not nil. + # - nolintlint # Reports ill-formed or insufficient nolint directives + # - predeclared # find code that shadows one of Go's predeclared identifiers + # - reassign # Checks that package variables are not reassigned + # - rowserrcheck # checks whether Err of rows is checked successfully + # - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed. + # - staticcheck # (megacheck): Staticcheck is a go vet on steroids, applying a ton of static analysis checks + # - testableexamples # linter checks if examples are testable (have an expected output) + # - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 + # - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes + # - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + # - unconvert # Remove unnecessary type conversions + # - unused # (megacheck): Checks Go code for unused constants, variables, functions and types + # - usestdlibvars # A linter that detect the possibility to use variables/constants from the Go standard library. + + # + # Recommended? (easy) + # + + - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) + - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted. + - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + - exhaustive # check exhaustiveness of enum switch statements + - gci # Gci control golang package import order and make it always deterministic. + - godot # Check if comments end in a period + - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification + - goimports # In addition to fixing imports, goimports also formats your code in the same style as gofmt. + - gosec # (gas): Inspects source code for security problems + - lll # Reports long lines + - nakedret # Finds naked returns in functions greater than a specified function length + - nonamedreturns # Reports all named returns + - nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL. + - promlinter # Check Prometheus metrics naming via promlint + - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. + - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - wastedassign # wastedassign finds wasted assignment statements. + - wrapcheck # Checks that errors returned from external packages are wrapped + + # + # Recommended? (requires some work) + # + + - bodyclose # checks whether HTTP response body is closed successfully + - containedctx # containedctx is a linter that detects struct contained context.Context field + - contextcheck # check the function whether use a non-inherited context + - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. + - gomnd # An analyzer to detect magic numbers. + - ireturn # Accept Interfaces, Return Concrete Types + - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. + - noctx # noctx finds sending http request without context.Context + - unparam # Reports unused function parameters + + # + # Formatting only, useful in IDE but should not be forced on CI? + # + + - gofumpt # Gofumpt checks whether code was gofumpt-ed. + - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity + - whitespace # Tool for detection of leading and trailing whitespace + - wsl # Whitespace Linter - Forces you to use empty lines! + + # + # Well intended, but not ready for this + # + - cyclop # checks function and package cyclomatic complexity + - dupl # Tool for code clone detection + - forcetypeassert # finds forced type assertions + - gocognit # Computes and checks the cognitive complexity of functions + - gocyclo # Computes and checks the cyclomatic complexity of functions + - godox # Tool for detection of FIXME, TODO and other comment keywords + - goerr113 # Golang linter to check the errors handling expressions + - maintidx # maintidx measures the maintainability index of each function. + - nestif # Reports deeply nested if statements + - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test + - testpackage # linter that makes you use a separate _test package + + # + # Too strict / too many false positives (for now?) + # + - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds + - exhaustruct # Checks if all structure fields are initialized + - forbidigo # Forbids identifiers + - gochecknoglobals # check that no global variables exist + - goconst # Finds repeated strings that could be replaced by a constant + - stylecheck # Stylecheck is a replacement for golint + - tagliatelle # Checks the struct tags. + - varnamelen # checks that the length of a variable's name matches its scope + + # + # Under evaluation + # + + - prealloc # Finds slice declarations that could potentially be preallocated + + +issues: + max-issues-per-linter: 0 + max-same-issues: 10 + exclude-rules: + - path: go.mod + text: "replacement are not allowed: golang.org/x/time/rate" + + # `err` is often shadowed, we may continue to do it + - linters: + - govet + text: "shadow: declaration of \"err\" shadows declaration" + + # + # errcheck + # + + - linters: + - errcheck + text: "Error return value of `.*` is not checked" + + # + # gocritic + # + + - linters: + - gocritic + text: "ifElseChain: rewrite if-else to switch statement" + + - linters: + - gocritic + text: "captLocal: `.*' should not be capitalized" + + - linters: + - gocritic + text: "appendAssign: append result not assigned to the same slice" + + - linters: + - gocritic + text: "commentFormatting: put a space between `//` and comment text" diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000..fbdeb80 --- /dev/null +++ b/.yamllint @@ -0,0 +1,43 @@ +--- +rules: + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 1 + colons: + max-spaces-before: 0 + max-spaces-after: 1 + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + comments: + level: warning + require-starting-space: true + min-spaces-from-content: 2 + comments-indentation: + level: warning + document-end: disable + document-start: disable + empty-lines: + max: 2 + max-start: 0 + max-end: 0 + hyphens: + max-spaces-after: 1 + indentation: + spaces: consistent + indent-sequences: whatever + check-multi-line-strings: false + key-duplicates: enable + line-length: + max: 180 + allow-non-breakable-words: true + allow-non-breakable-inline-mappings: false + new-line-at-end-of-file: enable + new-lines: + type: unix + trailing-spaces: enable + truthy: disable diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..29baa72 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,3 @@ + +Please refer to [Contributing to CrowdSec](https://doc.crowdsec.net/docs/next/contributing/getting_started). + diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..332c05e --- /dev/null +++ b/Dockerfile @@ -0,0 +1,51 @@ +ARG BUILD_ENV=full +ARG GOVERSION=1.19 + +FROM golang:${GOVERSION}-alpine AS build + +WORKDIR /go/src/crowdsec + +COPY . . + +# wizard.sh requires GNU coreutils +RUN apk add --no-cache git gcc libc-dev make bash gettext binutils-gold coreutils && \ + SYSTEM="docker" make release && \ + cd crowdsec-v* && \ + ./wizard.sh --docker-mode && \ + cd - && \ + cscli hub update && \ + cscli collections install crowdsecurity/linux && \ + cscli parsers install crowdsecurity/whitelists + +FROM alpine:latest as build-slim + +RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata yq bash && \ + mkdir -p /staging/etc/crowdsec && \ + mkdir -p /staging/var/lib/crowdsec && \ + mkdir -p /var/lib/crowdsec/data +COPY --from=build /etc/crowdsec /staging/etc/crowdsec +COPY --from=build /usr/local/bin/crowdsec /usr/local/bin/crowdsec +COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli +COPY --from=build /go/src/crowdsec/docker/docker_start.sh / +COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml + +ENTRYPOINT /bin/bash docker_start.sh + +FROM build-slim as build-plugins +# Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp +# The files are here for reference, as users will need to mount a new version to be actually able to use notifications +COPY --from=build /go/src/crowdsec/plugins/notifications/email/email.yaml /staging/etc/crowdsec/notifications/email.yaml +COPY --from=build /go/src/crowdsec/plugins/notifications/http/http.yaml /staging/etc/crowdsec/notifications/http.yaml +COPY --from=build /go/src/crowdsec/plugins/notifications/slack/slack.yaml /staging/etc/crowdsec/notifications/slack.yaml +COPY --from=build /go/src/crowdsec/plugins/notifications/splunk/splunk.yaml /staging/etc/crowdsec/notifications/splunk.yaml +COPY --from=build /usr/local/lib/crowdsec/plugins /usr/local/lib/crowdsec/plugins + +FROM build-slim as build-geoip + +COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec + +FROM build-plugins as build-full + +COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec + +FROM build-${BUILD_ENV} diff --git a/Dockerfile.debian b/Dockerfile.debian new file mode 100644 index 0000000..3ca34d3 --- /dev/null +++ b/Dockerfile.debian @@ -0,0 +1,64 @@ +ARG BUILD_ENV=full +ARG GOVERSION=1.19 + +FROM golang:${GOVERSION}-bullseye AS build + +WORKDIR /go/src/crowdsec + +COPY . . + +# wizard.sh requires GNU coreutils +RUN apt-get update && \ + apt-get install -y git gcc libc-dev make bash gettext binutils-gold coreutils tzdata && \ + SYSTEM="docker" make release && \ + cd crowdsec-v* && \ + ./wizard.sh --docker-mode && \ + cd - && \ + cscli hub update && \ + cscli collections install crowdsecurity/linux && \ + cscli parsers install crowdsecurity/whitelists && \ + go install github.com/mikefarah/yq/v4@latest + +FROM debian:bullseye-slim as build-slim + +RUN apt-get update && \ + apt-get install -y -q --install-recommends --no-install-suggests \ + procps \ + systemd \ + iproute2 \ + ca-certificates \ + bash \ + tzdata && \ + mkdir -p /staging/etc/crowdsec && \ + mkdir -p /staging/var/lib/crowdsec && \ + mkdir -p /var/lib/crowdsec/data + +COPY --from=build /go/bin/yq /usr/local/bin/yq +COPY --from=build /etc/crowdsec /staging/etc/crowdsec +COPY --from=build /usr/local/bin/crowdsec /usr/local/bin/crowdsec +COPY --from=build /usr/local/bin/cscli /usr/local/bin/cscli +COPY --from=build /go/src/crowdsec/docker/docker_start.sh / +COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml +RUN yq eval -i ".plugin_config.group = \"nogroup\"" /staging/etc/crowdsec/config.yaml + +ENTRYPOINT /bin/bash docker_start.sh + +FROM build-slim as build-plugins + +# Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp +# The files are here for reference, as users will need to mount a new version to be actually able to use notifications +COPY --from=build /go/src/crowdsec/plugins/notifications/email/email.yaml /staging/etc/crowdsec/notifications/email.yaml +COPY --from=build /go/src/crowdsec/plugins/notifications/http/http.yaml /staging/etc/crowdsec/notifications/http.yaml +COPY --from=build /go/src/crowdsec/plugins/notifications/slack/slack.yaml /staging/etc/crowdsec/notifications/slack.yaml +COPY --from=build /go/src/crowdsec/plugins/notifications/splunk/splunk.yaml /staging/etc/crowdsec/notifications/splunk.yaml +COPY --from=build /usr/local/lib/crowdsec/plugins /usr/local/lib/crowdsec/plugins + +FROM build-slim as build-geoip + +COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec + +FROM build-plugins as build-full + +COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec + +FROM build-${BUILD_ENV} diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..8a85fc9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020-2022 Crowdsec + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..3ec6703 --- /dev/null +++ b/Makefile @@ -0,0 +1,213 @@ + +ifeq ($(OS), Windows_NT) + SHELL := pwsh.exe + .SHELLFLAGS := -NoProfile -Command + CS_ROOT = $(shell (Get-Location).Path) + SYSTEM = windows + EXT = .exe +else + CS_ROOT ?= $(shell pwd) + SYSTEM ?= $(shell uname -s | tr '[A-Z]' '[a-z]') +endif + +ifneq ("$(wildcard $(CURDIR)/platform/$(SYSTEM).mk)", "") + include $(CURDIR)/platform/$(SYSTEM).mk +else + include $(CURDIR)/platform/linux.mk +endif + +ifneq ($(OS), Windows_NT) + include $(CS_ROOT)/platform/unix_common.mk +endif + +CROWDSEC_FOLDER = ./cmd/crowdsec +CSCLI_FOLDER = ./cmd/crowdsec-cli/ + +HTTP_PLUGIN_FOLDER = ./plugins/notifications/http +SLACK_PLUGIN_FOLDER = ./plugins/notifications/slack +SPLUNK_PLUGIN_FOLDER = ./plugins/notifications/splunk +EMAIL_PLUGIN_FOLDER = ./plugins/notifications/email +DUMMY_PLUGIN_FOLDER = ./plugins/notifications/dummy + +HTTP_PLUGIN_BIN = notification-http$(EXT) +SLACK_PLUGIN_BIN = notification-slack$(EXT) +SPLUNK_PLUGIN_BIN = notification-splunk$(EXT) +EMAIL_PLUGIN_BIN = notification-email$(EXT) +DUMMY_PLUGIN_BIN= notification-dummy$(EXT) + +HTTP_PLUGIN_CONFIG = http.yaml +SLACK_PLUGIN_CONFIG = slack.yaml +SPLUNK_PLUGIN_CONFIG = splunk.yaml +EMAIL_PLUGIN_CONFIG = email.yaml + +CROWDSEC_BIN = crowdsec$(EXT) +CSCLI_BIN = cscli$(EXT) +BUILD_CMD = build + +MINIMUM_SUPPORTED_GO_MAJOR_VERSION = 1 +MINIMUM_SUPPORTED_GO_MINOR_VERSION = 17 +GO_VERSION_VALIDATION_ERR_MSG = Golang version ($(BUILD_GOVERSION)) is not supported, please use at least $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION).$(MINIMUM_SUPPORTED_GO_MINOR_VERSION) + +LD_OPTS_VARS= \ +-X 'github.com/crowdsecurity/crowdsec/cmd/crowdsec.bincoverTesting=$(BINCOVER_TESTING)' \ +-X 'github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli.bincoverTesting=$(BINCOVER_TESTING)' \ +-X 'github.com/crowdsecurity/crowdsec/pkg/cwversion.Version=$(BUILD_VERSION)' \ +-X 'github.com/crowdsecurity/crowdsec/pkg/cwversion.BuildDate=$(BUILD_TIMESTAMP)' \ +-X 'github.com/crowdsecurity/crowdsec/pkg/cwversion.Codename=$(BUILD_CODENAME)' \ +-X 'github.com/crowdsecurity/crowdsec/pkg/cwversion.Tag=$(BUILD_TAG)' \ +-X 'github.com/crowdsecurity/crowdsec/pkg/cwversion.GoVersion=$(BUILD_GOVERSION)' \ +-X 'github.com/crowdsecurity/crowdsec/pkg/csconfig.defaultConfigDir=$(DEFAULT_CONFIGDIR)' \ +-X 'github.com/crowdsecurity/crowdsec/pkg/csconfig.defaultDataDir=$(DEFAULT_DATADIR)' + +ifdef BUILD_STATIC + export LD_OPTS=-ldflags "-s -w $(LD_OPTS_VARS) -extldflags '-static'" -tags netgo,osusergo,sqlite_omit_load_extension +else + export LD_OPTS=-ldflags "-s -w $(LD_OPTS_VARS)" +endif + +GOCMD = go +GOTEST = $(GOCMD) test + +RELDIR = crowdsec-$(BUILD_VERSION) + +.PHONY: build +build: goversion crowdsec cscli plugins + +.PHONY: all +all: clean test build + +.PHONY: plugins +plugins: http-plugin slack-plugin splunk-plugin email-plugin dummy-plugin + +.PHONY: goversion +goversion: +ifneq ($(OS), Windows_NT) + @if [ $(GO_MAJOR_VERSION) -gt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \ + exit 0 ;\ + elif [ $(GO_MAJOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \ + echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ + exit 1; \ + elif [ $(GO_MINOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MINOR_VERSION) ] ; then \ + echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ + exit 1; \ + fi +else + # This needs Set-ExecutionPolicy -Scope CurrentUser Unrestricted + @$(CS_ROOT)/scripts/check_go_version.ps1 $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) $(MINIMUM_SUPPORTED_GO_MINOR_VERSION) +endif + +.PHONY: clean +clean: testclean + @$(MAKE) -C $(CROWDSEC_FOLDER) clean --no-print-directory RM="$(RM)" WIN_IGNORE_ERR="$(WIN_IGNORE_ERR)" CP="$(CP)" CPR="$(CPR)" MKDIR="$(MKDIR)" + @$(MAKE) -C $(CSCLI_FOLDER) clean --no-print-directory RM="$(RM)" WIN_IGNORE_ERR="$(WIN_IGNORE_ERR)" CP="$(CP)" CPR="$(CPR)" MKDIR="$(MKDIR)" + @$(RM) $(CROWDSEC_BIN) $(WIN_IGNORE_ERR) + @$(RM) $(CSCLI_BIN) $(WIN_IGNORE_ERR) + @$(RM) *.log $(WIN_IGNORE_ERR) + @$(RM) crowdsec-release.tgz $(WIN_IGNORE_ERR) + @$(RM) crowdsec-release-static.tgz $(WIN_IGNORE_ERR) + @$(RM) $(HTTP_PLUGIN_FOLDER)/$(HTTP_PLUGIN_BIN) $(WIN_IGNORE_ERR) + @$(RM) $(SLACK_PLUGIN_FOLDER)/$(SLACK_PLUGIN_BIN) $(WIN_IGNORE_ERR) + @$(RM) $(SPLUNK_PLUGIN_FOLDER)/$(SPLUNK_PLUGIN_BIN) $(WIN_IGNORE_ERR) + @$(RM) $(EMAIL_PLUGIN_FOLDER)/$(EMAIL_PLUGIN_BIN) $(WIN_IGNORE_ERR) + @$(RM) $(DUMMY_PLUGIN_FOLDER)/$(DUMMY_PLUGIN_BIN) $(WIN_IGNORE_ERR) + + +cscli: goversion + @$(MAKE) -C $(CSCLI_FOLDER) build --no-print-directory GOARCH=$(GOARCH) GOOS=$(GOOS) RM="$(RM)" WIN_IGNORE_ERR="$(WIN_IGNORE_ERR)" CP="$(CP)" CPR="$(CPR)" MKDIR="$(MKDIR)" + +cscli-bincover: goversion + @GOARCH=$(GOARCH) GOOS=$(GOOS) $(MAKE) -C $(CSCLI_FOLDER) build-bincover --no-print-directory + +crowdsec: goversion + @$(MAKE) -C $(CROWDSEC_FOLDER) build --no-print-directory GOARCH=$(GOARCH) GOOS=$(GOOS) RM="$(RM)" WIN_IGNORE_ERR="$(WIN_IGNORE_ERR)" CP="$(CP)" CPR="$(CPR)" MKDIR="$(MKDIR)" + +crowdsec-bincover: goversion + @GOARCH=$(GOARCH) GOOS=$(GOOS) $(MAKE) -C $(CROWDSEC_FOLDER) build-bincover --no-print-directory + +http-plugin: goversion + @$(MAKE) -C $(HTTP_PLUGIN_FOLDER) build --no-print-directory GOARCH=$(GOARCH) GOOS=$(GOOS) RM="$(RM)" WIN_IGNORE_ERR="$(WIN_IGNORE_ERR)" CP="$(CP)" CPR="$(CPR)" MKDIR="$(MKDIR)" + +slack-plugin: goversion + @$(MAKE) -C $(SLACK_PLUGIN_FOLDER) build --no-print-directory GOARCH=$(GOARCH) GOOS=$(GOOS) RM="$(RM)" WIN_IGNORE_ERR="$(WIN_IGNORE_ERR)" CP="$(CP)" CPR="$(CPR)" MKDIR="$(MKDIR)" + +splunk-plugin: goversion + @$(MAKE) -C $(SPLUNK_PLUGIN_FOLDER) build --no-print-directory GOARCH=$(GOARCH) GOOS=$(GOOS) RM="$(RM)" WIN_IGNORE_ERR="$(WIN_IGNORE_ERR)" WIN_IGNORE_ERR="$(WIN_IGNORE_ERR)" CP="$(CP)" CPR="$(CPR)" MKDIR="$(MKDIR)" + +email-plugin: goversion + @$(MAKE) -C $(EMAIL_PLUGIN_FOLDER) build --no-print-directory GOARCH=$(GOARCH) GOOS=$(GOOS) RM="$(RM)" WIN_IGNORE_ERR="$(WIN_IGNORE_ERR)" CP="$(CP)" CPR="$(CPR)" MKDIR="$(MKDIR)" + +dummy-plugin: goversion + $(MAKE) -C $(DUMMY_PLUGIN_FOLDER) build --no-print-directory GOARCH=$(GOARCH) GOOS=$(GOOS) RM="$(RM)" WIN_IGNORE_ERR="$(WIN_IGNORE_ERR)" CP="$(CP)" CPR="$(CPR)" MKDIR="$(MKDIR)" + +.PHONY: testclean +testclean: bats-clean + @$(RM) pkg/apiserver/ent $(WIN_IGNORE_ERR) + @$(RM) pkg/cwhub/hubdir $(WIN_IGNORE_ERR) + @$(RM) pkg/cwhub/install $(WIN_IGNORE_ERR) + @$(RM) pkg/types/example.txt $(WIN_IGNORE_ERR) + +.PHONY: test +test: export AWS_ENDPOINT_FORCE=http://localhost:4566 +test: goversion + @echo 'NOTE: You need Docker, docker-compose and run "make localstack" in a separate shell ("make localstack-stop" to terminate it)' + $(GOTEST) $(LD_OPTS) ./... + +.PHONY: localstack +localstack: + docker-compose -f tests/localstack/docker-compose.yml up + +.PHONY: localstack-stop +localstack-stop: + docker-compose -f tests/localstack/docker-compose.yml down + +package-common: + @echo "Building Release to dir $(RELDIR)" + @$(MKDIR) $(RELDIR)/cmd/crowdsec + @$(MKDIR) $(RELDIR)/cmd/crowdsec-cli + @$(MKDIR) $(RELDIR)/$(subst ./,,$(HTTP_PLUGIN_FOLDER)) + @$(MKDIR) $(RELDIR)/$(subst ./,,$(SLACK_PLUGIN_FOLDER)) + @$(MKDIR) $(RELDIR)/$(subst ./,,$(SPLUNK_PLUGIN_FOLDER)) + @$(MKDIR) $(RELDIR)/$(subst ./,,$(EMAIL_PLUGIN_FOLDER)) + + @$(CP) $(CROWDSEC_FOLDER)/$(CROWDSEC_BIN) $(RELDIR)/cmd/crowdsec + @$(CP) $(CSCLI_FOLDER)/$(CSCLI_BIN) $(RELDIR)/cmd/crowdsec-cli + + @$(CP) $(HTTP_PLUGIN_FOLDER)/$(HTTP_PLUGIN_BIN) $(RELDIR)/$(subst ./,,$(HTTP_PLUGIN_FOLDER)) + @$(CP) $(SLACK_PLUGIN_FOLDER)/$(SLACK_PLUGIN_BIN) $(RELDIR)/$(subst ./,,$(SLACK_PLUGIN_FOLDER)) + @$(CP) $(SPLUNK_PLUGIN_FOLDER)/$(SPLUNK_PLUGIN_BIN) $(RELDIR)/$(subst ./,,$(SPLUNK_PLUGIN_FOLDER)) + @$(CP) $(EMAIL_PLUGIN_FOLDER)/$(EMAIL_PLUGIN_BIN) $(RELDIR)/$(subst ./,,$(EMAIL_PLUGIN_FOLDER)) + + @$(CP) $(HTTP_PLUGIN_FOLDER)/$(HTTP_PLUGIN_CONFIG) $(RELDIR)/$(subst ./,,$(HTTP_PLUGIN_FOLDER)) + @$(CP) $(SLACK_PLUGIN_FOLDER)/$(SLACK_PLUGIN_CONFIG) $(RELDIR)/$(subst ./,,$(SLACK_PLUGIN_FOLDER)) + @$(CP) $(SPLUNK_PLUGIN_FOLDER)/$(SPLUNK_PLUGIN_CONFIG) $(RELDIR)/$(subst ./,,$(SPLUNK_PLUGIN_FOLDER)) + @$(CP) $(EMAIL_PLUGIN_FOLDER)/$(EMAIL_PLUGIN_CONFIG) $(RELDIR)/$(subst ./,,$(EMAIL_PLUGIN_FOLDER)) + + @$(CPR) ./config $(RELDIR) + @$(CP) wizard.sh $(RELDIR) + @$(CP) scripts/test_env.sh $(RELDIR) + @$(CP) scripts/test_env.ps1 $(RELDIR) + +.PHONY: package +package: package-common + @tar cvzf crowdsec-release.tgz $(RELDIR) + +.PHONY: check_release +check_release: +ifneq ($(OS), Windows_NT) + @if [ -d $(RELDIR) ]; then echo "$(RELDIR) already exists, abort" ; exit 1 ; fi +else + @if (Test-Path -Path $(RELDIR)) { echo "$(RELDIR) already exists, abort" ; exit 1 ; } +endif + +.PHONY: release +release: check_release build package + +.PHONY: windows_installer +windows_installer: build + @.\make_installer.ps1 -version $(BUILD_VERSION) + +.PHONY: chocolatey +chocolatey: windows_installer + @.\make_chocolatey.ps1 -version $(BUILD_VERSION) + +include tests/bats.mk diff --git a/README.md b/README.md new file mode 100644 index 0000000..b90b5e3 --- /dev/null +++ b/README.md @@ -0,0 +1,162 @@ + +

+CrowdSec +

+
+
+
+

+ + + + + + + + + + + + + + + +

+ +

+:computer: Console (WebApp) +:books: Documentation +:diamond_shape_with_a_dot_inside: Configuration Hub +:speech_balloon: Discourse (Forum) +:speech_balloon: Discord (Live Chat) +

+ + +:dancer: This is a community driven project, we need your feedback. + +## + +CrowdSec is a free, modern & collaborative behavior detection engine, coupled with a global IP reputation network. It stacks on fail2ban's philosophy but is IPV6 compatible and 60x faster (Go vs Python), uses Grok patterns to parse logs and YAML scenario to identify behaviors. CrowdSec is engineered for modern Cloud / Containers / VM based infrastructures (by decoupling detection and remediation). Once detected you can remedy threats with various bouncers (firewall block, nginx http 403, Captchas, etc.) while the aggressive IP can be sent to CrowdSec for curation before being shared among all users to further improve everyone's security. See [FAQ](https://doc.crowdsec.net/docs/faq) or read below for more. + +## 2 mins install + +Installing it through the [Package system](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) of your OS is the easiest way to proceed. +Otherwise, you can install it from source. + +### From package (Debian) + +```sh +curl -s https://packagecloud.io/install/repositories/crowdsec/crowdsec/script.deb.sh | sudo bash +sudo apt-get update +sudo apt-get install crowdsec +``` + +### From package (rhel/centos/amazon linux) + +```sh +curl -s https://packagecloud.io/install/repositories/crowdsec/crowdsec/script.rpm.sh | sudo bash +sudo yum install crowdsec +``` + +### From package (FreeBSD) + +``` +sudo pkg update +sudo pkg install crowdsec +``` + +### From source + +```sh +wget https://github.com/crowdsecurity/crowdsec/releases/latest/download/crowdsec-release.tgz +tar xzvf crowdsec-release.tgz +cd crowdsec-v* && sudo ./wizard.sh -i +``` + +## :information_source: About the CrowdSec project + +Crowdsec is an open-source, lightweight software, detecting peers with aggressive behaviors to prevent them from accessing your systems. Its user friendly design and assistance offers a low technical barrier of entry and nevertheless a high security gain. + +The architecture is as follows : + +

+ CrowdSec +

+ +Once an unwanted behavior is detected, deal with it through a [bouncer](https://hub.crowdsec.net/browse/#bouncers). The aggressive IP, scenario triggered and timestamp are sent for curation, to avoid poisoning & false positives. (This can be disabled). If verified, this IP is then redistributed to all CrowdSec users running the same scenario. + +## Outnumbering hackers all together + +By sharing the threat they faced, all users are protecting each-others (hence the name Crowd-Security). Crowdsec is designed for modern infrastructures, with its "*Detect Here, Remedy There*" approach, letting you analyse logs coming from several sources in one place and block threats at various levels (applicative, system, infrastructural) of your stack. + +CrowdSec ships by default with scenarios (brute force, port scan, web scan, etc.) adapted for most context, but you can easily extend it by picking more of them from the **[HUB](https://hub.crowdsec.net)**. It is also easy to adapt an existing one or create one yourself. + +## :point_right: What it is not + +CrowdSec is not a SIEM, storing your logs (neither locally nor remotely). Your data are analyzed locally and forgotten. + +Signals sent to the curation platform are limited to the very strict minimum: IP, Scenario, Timestamp. They are only used to allow the system to spot new rogue IPs, rule out false positives or poisoning attempts. + +## :arrow_down: Install it ! + +Crowdsec is available for various platforms : + + - [Use our debian repositories](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) or the [official debian packages](https://packages.debian.org/search?keywords=crowdsec&searchon=names&suite=stable§ion=all) + - An [image](https://hub.docker.com/r/crowdsecurity/crowdsec) is available for docker + - [Prebuilt release packages](https://github.com/crowdsecurity/crowdsec/releases) are also available (suitable for `amd64`) + - You can as well [build it from source](https://doc.crowdsec.net/docs/user_guides/building) + +Or look directly at [installation documentation](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) for other methods and platforms. + +## :tada: Key benefits + +### Fast assisted installation, no technical barrier + +
+ Initial configuration is automated, providing functional out-of-the-box setup + +
+ +### Out of the box detection + +
+ Baseline detection is effective out-of-the-box, no fine-tuning required (click to expand) + +
+ +### Easy bouncer deployment + +
+ It's trivial to add bouncers to enforce decisions of crowdsec (click to expand) + +
+ +### Easy dashboard access + +
+ It's easy to deploy a metabase interface to view your data simply with cscli (click to expand) + +
+ +### Hot & Cold logs + +
+ Process cold logs, for forensic, tests and chasing false-positives & false negatives (click to expand) + +
+ + +## 📦 About this repository + +This repository contains the code for the two main components of crowdsec : + - `crowdsec` : the daemon a-la-fail2ban that can read, parse, enrich and apply heuristics to logs. This is the component in charge of "detecting" the attacks + - `cscli` : the cli tool mainly used to interact with crowdsec : ban/unban/view current bans, enable/disable parsers and scenarios. + + +## Contributing + +If you wish to contribute to the core of crowdsec, you are welcome to open a PR in this repository. + +If you wish to add a new parser, scenario or collection, please open a PR in the [hub repository](https://github.com/crowdsecurity/hub). + +If you wish to contribute to the documentation, please open a PR in the [documentation repository](http://github.com/crowdsecurity/crowdsec-docs). diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..cbc9674 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,31 @@ +# Security Policy + +## Scope + +This security policy applies to : + - Crowdsec agent + - Crowdsec Local API + - Crowdsec bouncers **developed and maintained** by the Crowdsec team [1] + +Reports regarding developements of community members that are not part of the crowdsecurity organization will be thoroughly investigated nonetheless. + +[1] Projects developed and maintained by the Crowdsec team are under the **crowdsecurity** github organization. Bouncers developed by community members that are not part of the Crowdsec organization are explictely excluded. + +## Reporting a Vulnerability + +We are extremely grateful to security researchers and users that report vulnerabilities regarding the Crowdsec project. All reports are thoroughly investigated by members of the Crowdsec organization. + +You can email the private [security@crowdsec.net](mailto:security@crowdsec.net) list with the security details and the details expected for [all Crowdsec bug reports](https://github.com/crowdsecurity/crowdsec/blob/master/.github/ISSUE_TEMPLATE/bug_report.md). + +You may encrypt your email to this list using the GPG key of the [Security team](https://doc.crowdsec.net/docs/next/contact_team). Encryption using GPG is NOT required to make a disclosure. + +## When Should I Report a Vulnerability? + + - You think you discovered a potential security vulnerability in Crowdsec + - You are unsure how a vulnerability affects Crowdsec + - You think you discovered a vulnerability in another project that Crowdsec depends on + +For projects with their own vulnerability reporting and disclosure process, please report it directly there. + + + diff --git a/azure-pipelines.yml b/azure-pipelines.yml new file mode 100644 index 0000000..74ff97a --- /dev/null +++ b/azure-pipelines.yml @@ -0,0 +1,126 @@ +trigger: + tags: + include: + - "v*" + exclude: + - "v*freebsd" + branches: + exclude: + - "*" +pr: none + +pool: + vmImage: windows-latest + +stages: + - stage: Build + jobs: + - job: + displayName: "Build" + steps: + - task: DotNetCoreCLI@2 + displayName: "Install SignClient" + inputs: + command: 'custom' + custom: 'tool' + arguments: 'install --global SignClient --version 1.3.155' + - task: GoTool@0 + displayName: "Install Go 1.19" + inputs: + version: '1.19' + + - pwsh: | + choco install -y jq + choco install -y make + displayName: "Install builds deps" + - task: PowerShell@2 + inputs: + targetType: 'inline' + pwsh: true + #we are not calling make windows_installer because we want to sign the binaries before they are added to the MSI + script: | + make build + - task: AzureKeyVault@2 + inputs: + azureSubscription: 'Azure subscription 1(8a93ab40-7e99-445e-ad47-0f6a3e2ef546)' + KeyVaultName: 'CodeSigningSecrets' + SecretsFilter: 'CodeSigningUser,CodeSigningPassword' + RunAsPreJob: false + + - task: DownloadSecureFile@1 + inputs: + secureFile: appsettings.json + + - pwsh: | + SignClient.exe Sign --name "crowdsec-binaries" ` + --input "**/*.exe" --config (Join-Path -Path $(Agent.TempDirectory) -ChildPath "appsettings.json") ` + --user $(CodeSigningUser) --secret '$(CodeSigningPassword)' + displayName: "Sign Crowdsec binaries + plugins" + + - pwsh: | + $build_version=(git describe --tags (git rev-list --tags --max-count=1)).Substring(1) + if ($build_version.Contains("-")) + { + $build_version = $build_version.Substring(0, $build_version.IndexOf("-")) + } + .\make_installer.ps1 -version $build_version + Write-Host "##vso[task.setvariable variable=BuildVersion;isOutput=true]$build_version" + displayName: "Build Crowdsec MSI" + name: BuildMSI + + - pwsh: | + $build_version=(git describe --tags (git rev-list --tags --max-count=1)).Substring(1) + if ($build_version.Contains("-")) + { + $build_version = $build_version.Substring(0, $build_version.IndexOf("-")) + } + .\make_chocolatey.ps1 -version $build_version + displayName: "Build Chocolatey nupkg" + + - pwsh: | + SignClient.exe Sign --name "crowdsec-msi" ` + --input "*.msi" --config (Join-Path -Path $(Agent.TempDirectory) -ChildPath "appsettings.json") ` + --user $(CodeSigningUser) --secret '$(CodeSigningPassword)' + displayName: "Sign Crowdsec MSI" + + - task: PublishBuildArtifacts@1 + inputs: + PathtoPublish: '$(Build.Repository.LocalPath)\\crowdsec_$(BuildMSI.BuildVersion).msi' + ArtifactName: 'crowdsec.msi' + publishLocation: 'Container' + displayName: "Upload MSI artifact" + + - task: PublishBuildArtifacts@1 + inputs: + PathtoPublish: '$(Build.Repository.LocalPath)\\windows\\Chocolatey\\crowdsec\\crowdsec.$(BuildMSI.BuildVersion).nupkg' + ArtifactName: 'crowdsec.nupkg' + publishLocation: 'Container' + displayName: "Upload nupkg artifact" + - stage: Publish + dependsOn: Build + jobs: + - deployment: "Publish" + displayName: "Publish to GitHub" + environment: github + strategy: + runOnce: + deploy: + steps: + - bash: | + tag=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/crowdsecurity/crowdsec/releases | jq -r '. | map(select(.prerelease==true)) | sort_by(.created_at) | reverse | .[0].tag_name') + echo "##vso[task.setvariable variable=LatestPreRelease;isOutput=true]$tag" + name: GetLatestPrelease + - task: GitHubRelease@1 + inputs: + gitHubConnection: "github.com_blotus" + repositoryName: '$(Build.Repository.Name)' + action: 'edit' + tag: '$(GetLatestPrelease.LatestPreRelease)' + assetUploadMode: 'replace' + addChangeLog: false + isPreRelease: true #we force prerelease because the pipeline is invoked on tag creation, which happens when we do a prerelease + #the .. is an ugly hack, but I can't find the var that gives D:\a\1 ... + assets: | + $(Build.ArtifactStagingDirectory)\..\crowdsec.msi/*.msi + $(Build.ArtifactStagingDirectory)\..\crowdsec.nupkg/*.nupkg + condition: ne(variables['GetLatestPrelease.LatestPreRelease'], '') diff --git a/cmd/crowdsec-cli/Makefile b/cmd/crowdsec-cli/Makefile new file mode 100644 index 0000000..2a788b7 --- /dev/null +++ b/cmd/crowdsec-cli/Makefile @@ -0,0 +1,42 @@ +ifeq ($(OS), Windows_NT) + SHELL := pwsh.exe + .SHELLFLAGS := -NoProfile -Command + EXT = .exe +endif + +# Go parameters +GOCMD = go +GOBUILD = $(GOCMD) build +GOCLEAN = $(GOCMD) clean +GOTEST = $(GOCMD) test +GOGET = $(GOCMD) get + +BINARY_NAME = cscli$(EXT) +# names longer than 15 chars break 'pgrep' +BINARY_NAME_COVER = $(BINARY_NAME).cover +PREFIX ?= "/" +BIN_PREFIX = $(PREFIX)"/usr/local/bin/" + +.PHONY: all +all: clean build + +build: clean + $(GOBUILD) $(LD_OPTS) $(BUILD_VENDOR_FLAGS) -o $(BINARY_NAME) + +build-bincover: clean + $(GOTEST) . -tags testrunmain -coverpkg=$(go list github.com/crowdsecurity/crowdsec/... | grep -v -e 'pkg/database' -e 'plugins/notifications' -e 'pkg/protobufs' -e 'pkg/cwversions' -e 'pkg/cstest' -e 'pkg/models') -covermode=atomic $(LD_OPTS) -c -o $(BINARY_NAME_COVER) + +.PHONY: install +install: install-conf install-bin + +install-conf: + +install-bin: + @install -v -m 755 -D "$(BINARY_NAME)" "$(BIN_PREFIX)/$(BINARY_NAME)" || exit + +uninstall: + @$(RM) $(CSCLI_CONFIG) $(WIN_IGNORE_ERR) + @$(RM) $(BIN_PREFIX)$(BINARY_NAME) $(WIN_IGNORE_ERR) + +clean: + @$(RM) $(BINARY_NAME) $(BINARY_NAME_COVER) $(WIN_IGNORE_ERR) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go new file mode 100644 index 0000000..2ca5b07 --- /dev/null +++ b/cmd/crowdsec-cli/alerts.go @@ -0,0 +1,445 @@ +package main + +import ( + "context" + "encoding/csv" + "encoding/json" + "fmt" + "net/url" + "os" + "strconv" + "strings" + + "github.com/fatih/color" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +var printMachine bool +var limit *int + +func DecisionsFromAlert(alert *models.Alert) string { + ret := "" + var decMap = make(map[string]int) + for _, decision := range alert.Decisions { + k := *decision.Type + if *decision.Simulated { + k = fmt.Sprintf("(simul)%s", k) + } + v := decMap[k] + decMap[k] = v + 1 + } + for k, v := range decMap { + if len(ret) > 0 { + ret += " " + } + ret += fmt.Sprintf("%s:%d", k, v) + } + return ret +} + +func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { + + if csConfig.Cscli.Output == "raw" { + csvwriter := csv.NewWriter(os.Stdout) + header := []string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at"} + if printMachine { + header = append(header, "machine") + } + err := csvwriter.Write(header) + if err != nil { + return err + } + for _, alertItem := range *alerts { + row := []string{ + fmt.Sprintf("%d", alertItem.ID), + *alertItem.Source.Scope, + *alertItem.Source.Value, + *alertItem.Scenario, + alertItem.Source.Cn, + alertItem.Source.GetAsNumberName(), + DecisionsFromAlert(alertItem), + *alertItem.StartAt, + } + if printMachine { + row = append(row, alertItem.MachineID) + } + err := csvwriter.Write(row) + if err != nil { + return err + } + } + csvwriter.Flush() + } else if csConfig.Cscli.Output == "json" { + x, _ := json.MarshalIndent(alerts, "", " ") + fmt.Printf("%s", string(x)) + } else if csConfig.Cscli.Output == "human" { + if len(*alerts) == 0 { + fmt.Println("No active alerts") + return nil + } + alertsTable(color.Output, alerts, printMachine) + } + return nil +} + +func DisplayOneAlert(alert *models.Alert, withDetail bool) error { + if csConfig.Cscli.Output == "human" { + fmt.Printf("\n################################################################################################\n\n") + scopeAndValue := *alert.Source.Scope + if *alert.Source.Value != "" { + scopeAndValue += ":" + *alert.Source.Value + } + fmt.Printf(" - ID : %d\n", alert.ID) + fmt.Printf(" - Date : %s\n", alert.CreatedAt) + fmt.Printf(" - Machine : %s\n", alert.MachineID) + fmt.Printf(" - Simulation : %v\n", *alert.Simulated) + fmt.Printf(" - Reason : %s\n", *alert.Scenario) + fmt.Printf(" - Events Count : %d\n", *alert.EventsCount) + fmt.Printf(" - Scope:Value: %s\n", scopeAndValue) + fmt.Printf(" - Country : %s\n", alert.Source.Cn) + fmt.Printf(" - AS : %s\n", alert.Source.AsName) + fmt.Printf(" - Begin : %s\n", *alert.StartAt) + fmt.Printf(" - End : %s\n\n", *alert.StopAt) + + alertDecisionsTable(color.Output, alert) + + if withDetail { + fmt.Printf("\n - Events :\n") + for _, event := range alert.Events { + alertEventTable(color.Output, event) + } + } + } + return nil +} + +func NewAlertsCmd() *cobra.Command { + /* ---- ALERTS COMMAND */ + var cmdAlerts = &cobra.Command{ + Use: "alerts [action]", + Short: "Manage alerts", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + var err error + if err := csConfig.LoadAPIClient(); err != nil { + return errors.Wrap(err, "loading api client") + } + apiURL, err := url.Parse(csConfig.API.Client.Credentials.URL) + if err != nil { + return errors.Wrapf(err, "parsing api url %s", apiURL) + } + Client, err = apiclient.NewClient(&apiclient.Config{ + MachineID: csConfig.API.Client.Credentials.Login, + Password: strfmt.Password(csConfig.API.Client.Credentials.Password), + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + + if err != nil { + return errors.Wrap(err, "new api client") + } + return nil + }, + } + + var alertListFilter = apiclient.AlertsListOpts{ + ScopeEquals: new(string), + ValueEquals: new(string), + ScenarioEquals: new(string), + IPEquals: new(string), + RangeEquals: new(string), + Since: new(string), + Until: new(string), + TypeEquals: new(string), + IncludeCAPI: new(bool), + } + limit = new(int) + contained := new(bool) + var cmdAlertsList = &cobra.Command{ + Use: "list [filters]", + Short: "List alerts", + Example: `cscli alerts list +cscli alerts list --ip 1.2.3.4 +cscli alerts list --range 1.2.3.0/24 +cscli alerts list -s crowdsecurity/ssh-bf +cscli alerts list --type ban`, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + + if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, + alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { + printHelp(cmd) + log.Fatalf("%s", err) + } + if limit != nil { + alertListFilter.Limit = limit + } + + if *alertListFilter.Until == "" { + alertListFilter.Until = nil + } else if strings.HasSuffix(*alertListFilter.Until, "d") { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + realDuration := strings.TrimSuffix(*alertListFilter.Until, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + printHelp(cmd) + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Until) + } + *alertListFilter.Until = fmt.Sprintf("%d%s", days*24, "h") + } + if *alertListFilter.Since == "" { + alertListFilter.Since = nil + } else if strings.HasSuffix(*alertListFilter.Since, "d") { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + realDuration := strings.TrimSuffix(*alertListFilter.Since, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + printHelp(cmd) + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Since) + } + *alertListFilter.Since = fmt.Sprintf("%d%s", days*24, "h") + } + + if *alertListFilter.IncludeCAPI { + *alertListFilter.Limit = 0 + } + + if *alertListFilter.TypeEquals == "" { + alertListFilter.TypeEquals = nil + } + if *alertListFilter.ScopeEquals == "" { + alertListFilter.ScopeEquals = nil + } + if *alertListFilter.ValueEquals == "" { + alertListFilter.ValueEquals = nil + } + if *alertListFilter.ScenarioEquals == "" { + alertListFilter.ScenarioEquals = nil + } + if *alertListFilter.IPEquals == "" { + alertListFilter.IPEquals = nil + } + if *alertListFilter.RangeEquals == "" { + alertListFilter.RangeEquals = nil + } + if contained != nil && *contained { + alertListFilter.Contains = new(bool) + } + alerts, _, err := Client.Alerts.List(context.Background(), alertListFilter) + if err != nil { + log.Fatalf("Unable to list alerts : %v", err) + } + + err = AlertsToTable(alerts, printMachine) + if err != nil { + log.Fatalf("unable to list alerts : %v", err) + } + }, + } + cmdAlertsList.Flags().SortFlags = false + cmdAlertsList.Flags().BoolVarP(alertListFilter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") + cmdAlertsList.Flags().StringVar(alertListFilter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") + cmdAlertsList.Flags().StringVar(alertListFilter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") + cmdAlertsList.Flags().StringVarP(alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") + cmdAlertsList.Flags().StringVarP(alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + cmdAlertsList.Flags().StringVarP(alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value )") + cmdAlertsList.Flags().StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)") + cmdAlertsList.Flags().StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)") + cmdAlertsList.Flags().StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + cmdAlertsList.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") + cmdAlertsList.Flags().BoolVarP(&printMachine, "machine", "m", false, "print machines that sent alerts") + cmdAlertsList.Flags().IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)") + cmdAlerts.AddCommand(cmdAlertsList) + + var ActiveDecision *bool + var AlertDeleteAll bool + var delAlertByID string + var alertDeleteFilter = apiclient.AlertsDeleteOpts{ + ScopeEquals: new(string), + ValueEquals: new(string), + ScenarioEquals: new(string), + IPEquals: new(string), + RangeEquals: new(string), + } + var cmdAlertsDelete = &cobra.Command{ + Use: "delete [filters] [--all]", + Short: `Delete alerts +/!\ This command can be use only on the same machine than the local API.`, + Example: `cscli alerts delete --ip 1.2.3.4 +cscli alerts delete --range 1.2.3.0/24 +cscli alerts delete -s crowdsecurity/ssh-bf"`, + DisableAutoGenTag: true, + Aliases: []string{"remove"}, + Args: cobra.ExactArgs(0), + PreRun: func(cmd *cobra.Command, args []string) { + if AlertDeleteAll { + return + } + if *alertDeleteFilter.ScopeEquals == "" && *alertDeleteFilter.ValueEquals == "" && + *alertDeleteFilter.ScenarioEquals == "" && *alertDeleteFilter.IPEquals == "" && + *alertDeleteFilter.RangeEquals == "" && delAlertByID == "" { + _ = cmd.Usage() + log.Fatalln("At least one filter or --all must be specified") + } + }, + Run: func(cmd *cobra.Command, args []string) { + var err error + + if !AlertDeleteAll { + if err := manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, + alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { + printHelp(cmd) + log.Fatalf("%s", err) + } + if ActiveDecision != nil { + alertDeleteFilter.ActiveDecisionEquals = ActiveDecision + } + + if *alertDeleteFilter.ScopeEquals == "" { + alertDeleteFilter.ScopeEquals = nil + } + if *alertDeleteFilter.ValueEquals == "" { + alertDeleteFilter.ValueEquals = nil + } + if *alertDeleteFilter.ScenarioEquals == "" { + alertDeleteFilter.ScenarioEquals = nil + } + if *alertDeleteFilter.IPEquals == "" { + alertDeleteFilter.IPEquals = nil + } + if *alertDeleteFilter.RangeEquals == "" { + alertDeleteFilter.RangeEquals = nil + } + if contained != nil && *contained { + alertDeleteFilter.Contains = new(bool) + } + limit := 0 + alertDeleteFilter.Limit = &limit + } else { + limit := 0 + alertDeleteFilter = apiclient.AlertsDeleteOpts{Limit: &limit} + } + + var alerts *models.DeleteAlertsResponse + if delAlertByID == "" { + alerts, _, err = Client.Alerts.Delete(context.Background(), alertDeleteFilter) + if err != nil { + log.Fatalf("Unable to delete alerts : %v", err) + } + } else { + alerts, _, err = Client.Alerts.DeleteOne(context.Background(), delAlertByID) + if err != nil { + log.Fatalf("Unable to delete alert : %v", err) + } + } + log.Infof("%s alert(s) deleted", alerts.NbDeleted) + }, + } + cmdAlertsDelete.Flags().SortFlags = false + cmdAlertsDelete.Flags().StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") + cmdAlertsDelete.Flags().StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + cmdAlertsDelete.Flags().StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + cmdAlertsDelete.Flags().StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + cmdAlertsDelete.Flags().StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + cmdAlertsDelete.Flags().StringVar(&delAlertByID, "id", "", "alert ID") + cmdAlertsDelete.Flags().BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts") + cmdAlertsDelete.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") + + cmdAlerts.AddCommand(cmdAlertsDelete) + + var details bool + var cmdAlertsInspect = &cobra.Command{ + Use: `inspect "alert_id"`, + Short: `Show info about an alert`, + Example: `cscli alerts inspect 123`, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if len(args) == 0 { + printHelp(cmd) + return + } + for _, alertID := range args { + id, err := strconv.Atoi(alertID) + if err != nil { + log.Fatalf("bad alert id %s", alertID) + continue + } + alert, _, err := Client.Alerts.GetByID(context.Background(), id) + if err != nil { + log.Fatalf("can't find alert with id %s: %s", alertID, err) + } + switch csConfig.Cscli.Output { + case "human": + if err := DisplayOneAlert(alert, details); err != nil { + continue + } + case "json": + data, err := json.MarshalIndent(alert, "", " ") + if err != nil { + log.Fatalf("unable to marshal alert with id %s: %s", alertID, err) + } + fmt.Printf("%s\n", string(data)) + case "raw": + data, err := yaml.Marshal(alert) + if err != nil { + log.Fatalf("unable to marshal alert with id %s: %s", alertID, err) + } + fmt.Printf("%s\n", string(data)) + } + } + }, + } + cmdAlertsInspect.Flags().SortFlags = false + cmdAlertsInspect.Flags().BoolVarP(&details, "details", "d", false, "show alerts with events") + + cmdAlerts.AddCommand(cmdAlertsInspect) + + var maxItems int + var maxAge string + var cmdAlertsFlush = &cobra.Command{ + Use: `flush`, + Short: `Flush alerts +/!\ This command can be used only on the same machine than the local API`, + Example: `cscli alerts flush --max-items 1000 --max-age 7d`, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI { + log.Fatal("Local API is disabled, please run this command on the local API machine") + } + if err := csConfig.LoadDBConfig(); err != nil { + log.Fatalf(err.Error()) + } + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + log.Info("Flushing alerts. !! This may take a long time !!") + err = dbClient.FlushAlerts(maxAge, maxItems) + if err != nil { + log.Fatalf("unable to flush alerts: %s", err) + } + log.Info("Alerts flushed") + }, + } + + cmdAlertsFlush.Flags().SortFlags = false + cmdAlertsFlush.Flags().IntVar(&maxItems, "max-items", 5000, "Maximum number of alert items to keep in the database") + cmdAlertsFlush.Flags().StringVar(&maxAge, "max-age", "7d", "Maximum age of alert items to keep in the database") + + cmdAlerts.AddCommand(cmdAlertsFlush) + + return cmdAlerts +} diff --git a/cmd/crowdsec-cli/alerts_table.go b/cmd/crowdsec-cli/alerts_table.go new file mode 100644 index 0000000..6bca95b --- /dev/null +++ b/cmd/crowdsec-cli/alerts_table.go @@ -0,0 +1,100 @@ +package main + +import ( + "fmt" + "io" + "sort" + "strconv" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +func alertsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { + t := newTable(out) + t.SetRowLines(false) + header := []string{"ID", "value", "reason", "country", "as", "decisions", "created_at"} + if printMachine { + header = append(header, "machine") + } + t.SetHeaders(header...) + + for _, alertItem := range *alerts { + displayVal := *alertItem.Source.Scope + if *alertItem.Source.Value != "" { + displayVal += ":" + *alertItem.Source.Value + } + + row := []string{ + strconv.Itoa(int(alertItem.ID)), + displayVal, + *alertItem.Scenario, + alertItem.Source.Cn, + alertItem.Source.GetAsNumberName(), + DecisionsFromAlert(alertItem), + *alertItem.StartAt, + } + + if printMachine { + row = append(row, alertItem.MachineID) + } + + t.AddRow(row...) + } + + t.Render() +} + +func alertDecisionsTable(out io.Writer, alert *models.Alert) { + foundActive := false + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("ID", "scope:value", "action", "expiration", "created_at") + for _, decision := range alert.Decisions { + parsedDuration, err := time.ParseDuration(*decision.Duration) + if err != nil { + log.Errorf(err.Error()) + } + expire := time.Now().UTC().Add(parsedDuration) + if time.Now().UTC().After(expire) { + continue + } + foundActive = true + scopeAndValue := *decision.Scope + if *decision.Value != "" { + scopeAndValue += ":" + *decision.Value + } + t.AddRow( + strconv.Itoa(int(decision.ID)), + scopeAndValue, + *decision.Type, + *decision.Duration, + alert.CreatedAt, + ) + } + if foundActive { + fmt.Printf(" - Active Decisions :\n") + t.Render() // Send output + } +} + +func alertEventTable(out io.Writer, event *models.Event) { + fmt.Fprintf(out, "\n- Date: %s\n", *event.Timestamp) + + t := newTable(out) + t.SetHeaders("Key", "Value") + sort.Slice(event.Meta, func(i, j int) bool { + return event.Meta[i].Key < event.Meta[j].Key + }) + + for _, meta := range event.Meta { + t.AddRow( + meta.Key, + meta.Value, + ) + } + + t.Render() // Send output +} diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go new file mode 100644 index 0000000..ee7426f --- /dev/null +++ b/cmd/crowdsec-cli/bouncers.go @@ -0,0 +1,168 @@ +package main + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/fatih/color" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +var keyIP string +var keyLength int +var key string + +func getBouncers(out io.Writer, dbClient *database.Client) error { + bouncers, err := dbClient.ListBouncers() + if err != nil { + return fmt.Errorf("unable to list bouncers: %s", err) + } + if csConfig.Cscli.Output == "human" { + getBouncersTable(out, bouncers) + } else if csConfig.Cscli.Output == "json" { + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + if err := enc.Encode(bouncers); err != nil { + return errors.Wrap(err, "failed to unmarshal") + } + return nil + } else if csConfig.Cscli.Output == "raw" { + csvwriter := csv.NewWriter(out) + err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}) + if err != nil { + return errors.Wrap(err, "failed to write raw header") + } + for _, b := range bouncers { + var revoked string + if !b.Revoked { + revoked = "validated" + } else { + revoked = "pending" + } + err := csvwriter.Write([]string{b.Name, b.IPAddress, revoked, b.LastPull.Format(time.RFC3339), b.Type, b.Version, b.AuthType}) + if err != nil { + return errors.Wrap(err, "failed to write raw") + } + } + csvwriter.Flush() + } + return nil +} + +func NewBouncersCmd() *cobra.Command { + /* ---- DECISIONS COMMAND */ + var cmdBouncers = &cobra.Command{ + Use: "bouncers [action]", + Short: "Manage bouncers [requires local API]", + Long: `To list/add/delete bouncers. +Note: This command requires database direct access, so is intended to be run on Local API/master. +`, + Args: cobra.MinimumNArgs(1), + Aliases: []string{"bouncer"}, + DisableAutoGenTag: true, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var err error + if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI { + log.Fatal("Local API is disabled, please run this command on the local API machine") + } + if err := csConfig.LoadDBConfig(); err != nil { + log.Fatalf(err.Error()) + } + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + }, + } + + var cmdBouncersList = &cobra.Command{ + Use: "list", + Short: "List bouncers", + Long: `List bouncers`, + Example: `cscli bouncers list`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, arg []string) { + err := getBouncers(color.Output, dbClient) + if err != nil { + log.Fatalf("unable to list bouncers: %s", err) + } + }, + } + cmdBouncers.AddCommand(cmdBouncersList) + + var cmdBouncersAdd = &cobra.Command{ + Use: "add MyBouncerName [--length 16]", + Short: "add bouncer", + Long: `add bouncer`, + Example: fmt.Sprintf(`cscli bouncers add MyBouncerName +cscli bouncers add MyBouncerName -l 24 +cscli bouncers add MyBouncerName -k %s`, generatePassword(32)), + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, arg []string) { + keyName := arg[0] + var apiKey string + var err error + if keyName == "" { + log.Fatalf("Please provide a name for the api key") + } + apiKey = key + if key == "" { + apiKey, err = middlewares.GenerateAPIKey(keyLength) + } + if err != nil { + log.Fatalf("unable to generate api key: %s", err) + } + _, err = dbClient.CreateBouncer(keyName, keyIP, middlewares.HashSHA512(apiKey), types.ApiKeyAuthType) + if err != nil { + log.Fatalf("unable to create bouncer: %s", err) + } + + if csConfig.Cscli.Output == "human" { + fmt.Printf("Api key for '%s':\n\n", keyName) + fmt.Printf(" %s\n\n", apiKey) + fmt.Print("Please keep this key since you will not be able to retrieve it!\n") + } else if csConfig.Cscli.Output == "raw" { + fmt.Printf("%s", apiKey) + } else if csConfig.Cscli.Output == "json" { + j, err := json.Marshal(apiKey) + if err != nil { + log.Fatalf("unable to marshal api key") + } + fmt.Printf("%s", string(j)) + } + }, + } + cmdBouncersAdd.Flags().IntVarP(&keyLength, "length", "l", 16, "length of the api key") + cmdBouncersAdd.Flags().StringVarP(&key, "key", "k", "", "api key for the bouncer") + cmdBouncers.AddCommand(cmdBouncersAdd) + + var cmdBouncersDelete = &cobra.Command{ + Use: "delete MyBouncerName", + Short: "delete bouncer", + Args: cobra.MinimumNArgs(1), + Aliases: []string{"remove"}, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + for _, bouncerID := range args { + err := dbClient.DeleteBouncer(bouncerID) + if err != nil { + log.Fatalf("unable to delete bouncer '%s': %s", bouncerID, err) + } + log.Infof("bouncer '%s' deleted successfully", bouncerID) + } + }, + } + cmdBouncers.AddCommand(cmdBouncersDelete) + return cmdBouncers +} diff --git a/cmd/crowdsec-cli/bouncers_table.go b/cmd/crowdsec-cli/bouncers_table.go new file mode 100644 index 0000000..0ea725f --- /dev/null +++ b/cmd/crowdsec-cli/bouncers_table.go @@ -0,0 +1,31 @@ +package main + +import ( + "io" + "time" + + "github.com/aquasecurity/table" + "github.com/enescakir/emoji" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +func getBouncersTable(out io.Writer, bouncers []*ent.Bouncer) { + t := newLightTable(out) + t.SetHeaders("Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type") + t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + for _, b := range bouncers { + var revoked string + if !b.Revoked { + revoked = emoji.CheckMark.String() + } else { + revoked = emoji.Prohibited.String() + } + + t.AddRow(b.Name, b.IPAddress, revoked, b.LastPull.Format(time.RFC3339), b.Type, b.Version, b.AuthType) + } + + t.Render() +} diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go new file mode 100644 index 0000000..e7e2e39 --- /dev/null +++ b/cmd/crowdsec-cli/capi.go @@ -0,0 +1,172 @@ +package main + +import ( + "context" + "fmt" + "net/url" + "os" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +var CAPIURLPrefix string = "v2" +var CAPIBaseURL string = "https://api.crowdsec.net/" +var capiUserPrefix string + +func NewCapiCmd() *cobra.Command { + var cmdCapi = &cobra.Command{ + Use: "capi [action]", + Short: "Manage interaction with Central API (CAPI)", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI { + return errors.Wrap(err, "Local API is disabled, please run this command on the local API machine") + } + if csConfig.API.Server.OnlineClient == nil { + log.Fatalf("no configuration for Central API in '%s'", *csConfig.FilePath) + } + + return nil + }, + } + + var cmdCapiRegister = &cobra.Command{ + Use: "register", + Short: "Register to Central API (CAPI)", + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + capiUser, err := generateID(capiUserPrefix) + if err != nil { + log.Fatalf("unable to generate machine id: %s", err) + } + password := strfmt.Password(generatePassword(passwordLength)) + apiurl, err := url.Parse(CAPIBaseURL) + if err != nil { + log.Fatalf("unable to parse api url %s : %s", CAPIBaseURL, err) + } + _, err = apiclient.RegisterClient(&apiclient.Config{ + MachineID: capiUser, + Password: password, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiurl, + VersionPrefix: CAPIURLPrefix, + }, nil) + + if err != nil { + log.Fatalf("api client register ('%s'): %s", CAPIBaseURL, err) + } + log.Printf("Successfully registered to Central API (CAPI)") + + var dumpFile string + + if outputFile != "" { + dumpFile = outputFile + } else if csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { + dumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath + } else { + dumpFile = "" + } + apiCfg := csconfig.ApiCredentialsCfg{ + Login: capiUser, + Password: password.String(), + URL: CAPIBaseURL, + } + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + log.Fatalf("unable to marshal api credentials: %s", err) + } + if dumpFile != "" { + err = os.WriteFile(dumpFile, apiConfigDump, 0600) + if err != nil { + log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err) + } + log.Printf("Central API credentials dumped to '%s'", dumpFile) + } else { + fmt.Printf("%s\n", string(apiConfigDump)) + } + + log.Warning(ReloadMessage()) + }, + } + cmdCapiRegister.Flags().StringVarP(&outputFile, "file", "f", "", "output file destination") + cmdCapiRegister.Flags().StringVar(&capiUserPrefix, "schmilblick", "", "set a schmilblick (use in tests only)") + if err := cmdCapiRegister.Flags().MarkHidden("schmilblick"); err != nil { + log.Fatalf("failed to hide flag: %s", err) + } + cmdCapi.AddCommand(cmdCapiRegister) + + var cmdCapiStatus = &cobra.Command{ + Use: "status", + Short: "Check status with the Central API (CAPI)", + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + if csConfig.API.Server == nil { + log.Fatalln("There is no configuration on 'api.server:'") + } + if csConfig.API.Server.OnlineClient == nil { + log.Fatalf("Please provide credentials for the Central API (CAPI) in '%s'", csConfig.API.Server.OnlineClient.CredentialsFilePath) + } + + if csConfig.API.Server.OnlineClient.Credentials == nil { + log.Fatalf("no credentials for Central API (CAPI) in '%s'", csConfig.API.Server.OnlineClient.CredentialsFilePath) + } + + password := strfmt.Password(csConfig.API.Server.OnlineClient.Credentials.Password) + apiurl, err := url.Parse(csConfig.API.Server.OnlineClient.Credentials.URL) + if err != nil { + log.Fatalf("parsing api url ('%s'): %s", csConfig.API.Server.OnlineClient.Credentials.URL, err) + } + + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to load hub index : %s", err) + } + scenarios, err := cwhub.GetInstalledScenariosAsString() + if err != nil { + log.Fatalf("failed to get scenarios : %s", err) + } + if len(scenarios) == 0 { + log.Fatalf("no scenarios installed, abort") + } + + Client, err = apiclient.NewDefaultClient(apiurl, CAPIURLPrefix, fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), nil) + if err != nil { + log.Fatalf("init default client: %s", err) + } + t := models.WatcherAuthRequest{ + MachineID: &csConfig.API.Server.OnlineClient.Credentials.Login, + Password: &password, + Scenarios: scenarios, + } + log.Infof("Loaded credentials from %s", csConfig.API.Server.OnlineClient.CredentialsFilePath) + log.Infof("Trying to authenticate with username %s on %s", csConfig.API.Server.OnlineClient.Credentials.Login, apiurl) + _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) + if err != nil { + log.Fatalf("Failed to authenticate to Central API (CAPI) : %s", err) + } + log.Infof("You can successfully interact with Central API (CAPI)") + }, + } + cmdCapi.AddCommand(cmdCapiStatus) + + return cmdCapi +} diff --git a/cmd/crowdsec-cli/collections.go b/cmd/crowdsec-cli/collections.go new file mode 100644 index 0000000..3e24a58 --- /dev/null +++ b/cmd/crowdsec-cli/collections.go @@ -0,0 +1,183 @@ +package main + +import ( + "fmt" + + "github.com/fatih/color" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewCollectionsCmd() *cobra.Command { + var cmdCollections = &cobra.Command{ + Use: "collections [action]", + Short: "Manage collections from hub", + Long: `Install/Remove/Upgrade/Inspect collections from the CrowdSec Hub.`, + /*TBD fix help*/ + Args: cobra.MinimumNArgs(1), + Aliases: []string{"collection"}, + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if csConfig.Hub == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := cwhub.SetHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to get Hub index : %v", err) + } + + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + if cmd.Name() == "inspect" || cmd.Name() == "list" { + return + } + log.Infof(ReloadMessage()) + }, + } + + var ignoreError bool + var cmdCollectionsInstall = &cobra.Command{ + Use: "install collection", + Short: "Install given collection(s)", + Long: `Fetch and install given collection(s) from hub`, + Example: `cscli collections install crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compAllItems(cwhub.COLLECTIONS, args, toComplete) + }, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + for _, name := range args { + t := cwhub.GetItem(cwhub.COLLECTIONS, name) + if t == nil { + nearestItem, score := GetDistance(cwhub.COLLECTIONS, name) + Suggest(cwhub.COLLECTIONS, name, nearestItem.Name, score, ignoreError) + continue + } + if err := cwhub.InstallItem(csConfig, name, cwhub.COLLECTIONS, forceAction, downloadOnly); err != nil { + if !ignoreError { + log.Fatalf("Error while installing '%s': %s", name, err) + } + log.Errorf("Error while installing '%s': %s", name, err) + } + } + }, + } + cmdCollectionsInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + cmdCollectionsInstall.PersistentFlags().BoolVar(&forceAction, "force", false, "Force install : Overwrite tainted and outdated files") + cmdCollectionsInstall.PersistentFlags().BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple collections") + cmdCollections.AddCommand(cmdCollectionsInstall) + + var cmdCollectionsRemove = &cobra.Command{ + Use: "remove collection", + Short: "Remove given collection(s)", + Long: `Remove given collection(s) from hub`, + Example: `cscli collections remove crowdsec/xxx crowdsec/xyz`, + Aliases: []string{"delete"}, + DisableAutoGenTag: true, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.COLLECTIONS, args, toComplete) + }, + Run: func(cmd *cobra.Command, args []string) { + if all { + cwhub.RemoveMany(csConfig, cwhub.COLLECTIONS, "", all, purge, forceAction) + return + } + + if len(args) == 0 { + log.Fatal("Specify at least one collection to remove or '--all' flag.") + } + + for _, name := range args { + if !forceAction { + item := cwhub.GetItem(cwhub.COLLECTIONS, name) + if item == nil { + log.Fatalf("unable to retrieve: %s\n", name) + } + if len(item.BelongsToCollections) > 0 { + log.Warningf("%s belongs to other collections :\n%s\n", name, item.BelongsToCollections) + log.Printf("Run 'sudo cscli collections remove %s --force' if you want to force remove this sub collection\n", name) + continue + } + } + cwhub.RemoveMany(csConfig, cwhub.COLLECTIONS, name, all, purge, forceAction) + } + }, + } + cmdCollectionsRemove.PersistentFlags().BoolVar(&purge, "purge", false, "Delete source file too") + cmdCollectionsRemove.PersistentFlags().BoolVar(&forceAction, "force", false, "Force remove : Remove tainted and outdated files") + cmdCollectionsRemove.PersistentFlags().BoolVar(&all, "all", false, "Delete all the collections") + cmdCollections.AddCommand(cmdCollectionsRemove) + + var cmdCollectionsUpgrade = &cobra.Command{ + Use: "upgrade collection", + Short: "Upgrade given collection(s)", + Long: `Fetch and upgrade given collection(s) from hub`, + Example: `cscli collections upgrade crowdsec/xxx crowdsec/xyz`, + DisableAutoGenTag: true, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.COLLECTIONS, args, toComplete) + }, + Run: func(cmd *cobra.Command, args []string) { + if all { + cwhub.UpgradeConfig(csConfig, cwhub.COLLECTIONS, "", forceAction) + } else { + if len(args) == 0 { + log.Fatalf("no target collection to upgrade") + } + for _, name := range args { + cwhub.UpgradeConfig(csConfig, cwhub.COLLECTIONS, name, forceAction) + } + } + }, + } + cmdCollectionsUpgrade.PersistentFlags().BoolVarP(&all, "all", "a", false, "Upgrade all the collections") + cmdCollectionsUpgrade.PersistentFlags().BoolVar(&forceAction, "force", false, "Force upgrade : Overwrite tainted and outdated files") + cmdCollections.AddCommand(cmdCollectionsUpgrade) + + var cmdCollectionsInspect = &cobra.Command{ + Use: "inspect collection", + Short: "Inspect given collection", + Long: `Inspect given collection`, + Example: `cscli collections inspect crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.COLLECTIONS, args, toComplete) + }, + Run: func(cmd *cobra.Command, args []string) { + for _, name := range args { + InspectItem(name, cwhub.COLLECTIONS) + } + }, + } + cmdCollectionsInspect.PersistentFlags().StringVarP(&prometheusURL, "url", "u", "", "Prometheus url") + cmdCollections.AddCommand(cmdCollectionsInspect) + + var cmdCollectionsList = &cobra.Command{ + Use: "list collection [-a]", + Short: "List all collections", + Long: `List all collections`, + Example: `cscli collections list`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + ListItems(color.Output, []string{cwhub.COLLECTIONS}, args, false, true, all) + }, + } + cmdCollectionsList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List disabled items as well") + cmdCollections.AddCommand(cmdCollectionsList) + + return cmdCollections +} diff --git a/cmd/crowdsec-cli/completion.go b/cmd/crowdsec-cli/completion.go new file mode 100644 index 0000000..c5604d3 --- /dev/null +++ b/cmd/crowdsec-cli/completion.go @@ -0,0 +1,86 @@ +package main + +import ( + "os" + + "github.com/spf13/cobra" +) + +func NewCompletionCmd() *cobra.Command { + + var completionCmd = &cobra.Command{ + Use: "completion [bash|zsh|powershell|fish]", + Short: "Generate completion script", + Long: `To load completions: + +### Bash: +` + "```shell" + ` + $ source <(cscli completion bash) + + # To load completions for each session, execute once: + + + # Linux: + + $ cscli completion bash | sudo tee /etc/bash_completion.d/cscli + $ source ~/.bashrc + + # macOS: + + $ cscli completion bash | sudo tee /usr/local/etc/bash_completion.d/cscli + + # Troubleshoot: + If you have this error (bash: _get_comp_words_by_ref: command not found), it seems that you need "bash-completion" dependency : + + * Install bash-completion package + $ source /etc/profile + $ source <(cscli completion bash) +` + "```" + ` + +### Zsh: +` + "```shell" + ` + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: + + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + # To load completions for each session, execute once: + + $ cscli completion zsh > "${fpath[1]}/_cscli" + + # You will need to start a new shell for this setup to take effect. + +### fish: +` + "```shell" + ` + $ cscli completion fish | source + + # To load completions for each session, execute once: + $ cscli completion fish > ~/.config/fish/completions/cscli.fish +` + "```" + ` +### PowerShell: +` + "```powershell" + ` + PS> cscli completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> cscli completion powershell > cscli.ps1 + # and source this file from your PowerShell profile. +` + "```", + DisableFlagsInUseLine: true, + DisableAutoGenTag: true, + ValidArgs: []string{"bash", "zsh", "powershell", "fish"}, + Args: cobra.ExactValidArgs(1), + Run: func(cmd *cobra.Command, args []string) { + switch args[0] { + case "bash": + cmd.Root().GenBashCompletion(os.Stdout) + case "zsh": + cmd.Root().GenZshCompletion(os.Stdout) + case "powershell": + cmd.Root().GenPowerShellCompletion(os.Stdout) + case "fish": + cmd.Root().GenFishCompletion(os.Stdout, true) + } + }, + } + return completionCmd +} diff --git a/cmd/crowdsec-cli/config.go b/cmd/crowdsec-cli/config.go new file mode 100644 index 0000000..063df21 --- /dev/null +++ b/cmd/crowdsec-cli/config.go @@ -0,0 +1,506 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/antonmedv/expr" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +type OldAPICfg struct { + MachineID string `json:"machine_id"` + Password string `json:"password"` +} + +/* Backup crowdsec configurations to directory : + +- Main config (config.yaml) +- Profiles config (profiles.yaml) +- Simulation config (simulation.yaml) +- Backup of API credentials (local API and online API) +- List of scenarios, parsers, postoverflows and collections that are up-to-date +- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections +*/ +func backupConfigToDirectory(dirPath string) error { + var err error + + if dirPath == "" { + return fmt.Errorf("directory path can't be empty") + } + log.Infof("Starting configuration backup") + /*if parent directory doesn't exist, bail out. create final dir with Mkdir*/ + parentDir := filepath.Dir(dirPath) + if _, err := os.Stat(parentDir); err != nil { + return errors.Wrapf(err, "while checking parent directory %s existence", parentDir) + } + + if err = os.Mkdir(dirPath, 0700); err != nil { + return errors.Wrapf(err, "while creating %s", dirPath) + } + + if csConfig.ConfigPaths.SimulationFilePath != "" { + backupSimulation := filepath.Join(dirPath, "simulation.yaml") + if err = types.CopyFile(csConfig.ConfigPaths.SimulationFilePath, backupSimulation); err != nil { + return errors.Wrapf(err, "failed copy %s to %s", csConfig.ConfigPaths.SimulationFilePath, backupSimulation) + } + log.Infof("Saved simulation to %s", backupSimulation) + } + + /* + - backup AcquisitionFilePath + - backup the other files of acquisition directory + */ + if csConfig.Crowdsec != nil && csConfig.Crowdsec.AcquisitionFilePath != "" { + backupAcquisition := filepath.Join(dirPath, "acquis.yaml") + if err = types.CopyFile(csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition, err) + } + } + + acquisBackupDir := filepath.Join(dirPath, "acquis") + if err = os.Mkdir(acquisBackupDir, 0700); err != nil { + return fmt.Errorf("error while creating %s : %s", acquisBackupDir, err) + } + + if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 { + for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles { + /*if it was the default one, it was already backup'ed*/ + if csConfig.Crowdsec.AcquisitionFilePath == acquisFile { + continue + } + targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile))) + if err != nil { + return errors.Wrapf(err, "while saving %s to %s", acquisFile, acquisBackupDir) + } + if err = types.CopyFile(acquisFile, targetFname); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err) + } + log.Infof("Saved acquis %s to %s", acquisFile, targetFname) + } + } + + if ConfigFilePath != "" { + backupMain := fmt.Sprintf("%s/config.yaml", dirPath) + if err = types.CopyFile(ConfigFilePath, backupMain); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", ConfigFilePath, backupMain, err) + } + log.Infof("Saved default yaml to %s", backupMain) + } + if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { + backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) + if err = types.CopyFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err) + } + log.Infof("Saved online API credentials to %s", backupCAPICreds) + } + if csConfig.API != nil && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" { + backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) + if err = types.CopyFile(csConfig.API.Client.CredentialsFilePath, backupLAPICreds); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Client.CredentialsFilePath, backupLAPICreds, err) + } + log.Infof("Saved local API credentials to %s", backupLAPICreds) + } + if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.ProfilesPath != "" { + backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) + if err = types.CopyFile(csConfig.API.Server.ProfilesPath, backupProfiles); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", csConfig.API.Server.ProfilesPath, backupProfiles, err) + } + log.Infof("Saved profiles to %s", backupProfiles) + } + + if err = BackupHub(dirPath); err != nil { + return fmt.Errorf("failed to backup hub config : %s", err) + } + + return nil +} + +/* Restore crowdsec configurations to directory : + +- Main config (config.yaml) +- Profiles config (profiles.yaml) +- Simulation config (simulation.yaml) +- Backup of API credentials (local API and online API) +- List of scenarios, parsers, postoverflows and collections that are up-to-date +- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections +*/ +func restoreConfigFromDirectory(dirPath string) error { + var err error + + if !restoreOldBackup { + backupMain := fmt.Sprintf("%s/config.yaml", dirPath) + if _, err = os.Stat(backupMain); err == nil { + if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" { + if err = types.CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupMain, csConfig.ConfigPaths.ConfigDir, err) + } + } + } + + // Now we have config.yaml, we should regenerate config struct to have rights paths etc + ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir) + initConfig() + + backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) + if _, err = os.Stat(backupCAPICreds); err == nil { + if err = types.CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err) + } + } + + backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) + if _, err = os.Stat(backupLAPICreds); err == nil { + if err = types.CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err) + } + } + + backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) + if _, err = os.Stat(backupProfiles); err == nil { + if err = types.CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupProfiles, csConfig.API.Server.ProfilesPath, err) + } + } + } else { + var oldAPICfg OldAPICfg + backupOldAPICfg := fmt.Sprintf("%s/api_creds.json", dirPath) + + jsonFile, err := os.Open(backupOldAPICfg) + if err != nil { + log.Warningf("failed to open %s : %s", backupOldAPICfg, err) + } else { + byteValue, _ := io.ReadAll(jsonFile) + err = json.Unmarshal(byteValue, &oldAPICfg) + if err != nil { + return fmt.Errorf("failed to load json file %s : %s", backupOldAPICfg, err) + } + + apiCfg := csconfig.ApiCredentialsCfg{ + Login: oldAPICfg.MachineID, + Password: oldAPICfg.Password, + URL: CAPIBaseURL, + } + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + return fmt.Errorf("unable to dump api credentials: %s", err) + } + apiConfigDumpFile := fmt.Sprintf("%s/online_api_credentials.yaml", csConfig.ConfigPaths.ConfigDir) + if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { + apiConfigDumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath + } + err = os.WriteFile(apiConfigDumpFile, apiConfigDump, 0644) + if err != nil { + return fmt.Errorf("write api credentials in '%s' failed: %s", apiConfigDumpFile, err) + } + log.Infof("Saved API credentials to %s", apiConfigDumpFile) + } + } + + backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath) + if _, err = os.Stat(backupSimulation); err == nil { + if err = types.CopyFile(backupSimulation, csConfig.ConfigPaths.SimulationFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err) + } + } + + /*if there is a acquisition dir, restore its content*/ + if csConfig.Crowdsec.AcquisitionDirPath != "" { + if err = os.Mkdir(csConfig.Crowdsec.AcquisitionDirPath, 0700); err != nil { + return fmt.Errorf("error while creating %s : %s", csConfig.Crowdsec.AcquisitionDirPath, err) + } + + } + + //if there was a single one + backupAcquisition := fmt.Sprintf("%s/acquis.yaml", dirPath) + if _, err = os.Stat(backupAcquisition); err == nil { + log.Debugf("restoring backup'ed %s", backupAcquisition) + if err = types.CopyFile(backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err) + } + } + + //if there is files in the acquis backup dir, restore them + acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml") + if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil { + for _, acquisFile := range acquisFiles { + targetFname, err := filepath.Abs(csConfig.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile)) + if err != nil { + return errors.Wrapf(err, "while saving %s to %s", acquisFile, targetFname) + } + log.Debugf("restoring %s to %s", acquisFile, targetFname) + if err = types.CopyFile(acquisFile, targetFname); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err) + } + } + } + + if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 { + for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles { + log.Infof("backup filepath from dir -> %s", acquisFile) + /*if it was the default one, it was already backup'ed*/ + if csConfig.Crowdsec.AcquisitionFilePath == acquisFile { + log.Infof("skip this one") + continue + } + targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile))) + if err != nil { + return errors.Wrapf(err, "while saving %s to %s", acquisFile, acquisBackupDir) + } + if err = types.CopyFile(acquisFile, targetFname); err != nil { + return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err) + } + log.Infof("Saved acquis %s to %s", acquisFile, targetFname) + } + } + + if err = RestoreHub(dirPath); err != nil { + return fmt.Errorf("failed to restore hub config : %s", err) + } + + return nil +} + +func NewConfigCmd() *cobra.Command { + + var cmdConfig = &cobra.Command{ + Use: "config [command]", + Short: "Allows to view current config", + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + } + var key string + type Env struct { + Config *csconfig.Config + } + var cmdConfigShow = &cobra.Command{ + Use: "show", + Short: "Displays current config", + Long: `Displays the current cli configuration.`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + + if key != "" { + program, err := expr.Compile(key, expr.Env(Env{})) + if err != nil { + log.Fatal(err) + } + output, err := expr.Run(program, Env{Config: csConfig}) + if err != nil { + log.Fatal(err) + } + switch csConfig.Cscli.Output { + case "human", "raw": + switch output.(type) { + case string: + fmt.Printf("%s\n", output) + case int: + fmt.Printf("%d\n", output) + default: + fmt.Printf("%v\n", output) + } + case "json": + data, err := json.MarshalIndent(output, "", " ") + if err != nil { + log.Fatalf("failed to marshal configuration: %s", err) + } + fmt.Printf("%s\n", string(data)) + } + return + } + + switch csConfig.Cscli.Output { + case "human": + fmt.Printf("Global:\n") + if csConfig.ConfigPaths != nil { + fmt.Printf(" - Configuration Folder : %s\n", csConfig.ConfigPaths.ConfigDir) + fmt.Printf(" - Data Folder : %s\n", csConfig.ConfigPaths.DataDir) + fmt.Printf(" - Hub Folder : %s\n", csConfig.ConfigPaths.HubDir) + fmt.Printf(" - Simulation File : %s\n", csConfig.ConfigPaths.SimulationFilePath) + } + if csConfig.Common != nil { + fmt.Printf(" - Log Folder : %s\n", csConfig.Common.LogDir) + fmt.Printf(" - Log level : %s\n", csConfig.Common.LogLevel) + fmt.Printf(" - Log Media : %s\n", csConfig.Common.LogMedia) + } + if csConfig.Crowdsec != nil { + fmt.Printf("Crowdsec:\n") + fmt.Printf(" - Acquisition File : %s\n", csConfig.Crowdsec.AcquisitionFilePath) + fmt.Printf(" - Parsers routines : %d\n", csConfig.Crowdsec.ParserRoutinesCount) + if csConfig.Crowdsec.AcquisitionDirPath != "" { + fmt.Printf(" - Acquisition Folder : %s\n", csConfig.Crowdsec.AcquisitionDirPath) + } + } + if csConfig.Cscli != nil { + fmt.Printf("cscli:\n") + fmt.Printf(" - Output : %s\n", csConfig.Cscli.Output) + fmt.Printf(" - Hub Branch : %s\n", csConfig.Cscli.HubBranch) + fmt.Printf(" - Hub Folder : %s\n", csConfig.Cscli.HubDir) + } + if csConfig.API != nil { + if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil { + fmt.Printf("API Client:\n") + fmt.Printf(" - URL : %s\n", csConfig.API.Client.Credentials.URL) + fmt.Printf(" - Login : %s\n", csConfig.API.Client.Credentials.Login) + fmt.Printf(" - Credentials File : %s\n", csConfig.API.Client.CredentialsFilePath) + } + if csConfig.API.Server != nil { + fmt.Printf("Local API Server:\n") + fmt.Printf(" - Listen URL : %s\n", csConfig.API.Server.ListenURI) + fmt.Printf(" - Profile File : %s\n", csConfig.API.Server.ProfilesPath) + if csConfig.API.Server.TLS != nil { + if csConfig.API.Server.TLS.CertFilePath != "" { + fmt.Printf(" - Cert File : %s\n", csConfig.API.Server.TLS.CertFilePath) + } + if csConfig.API.Server.TLS.KeyFilePath != "" { + fmt.Printf(" - Key File : %s\n", csConfig.API.Server.TLS.KeyFilePath) + } + if csConfig.API.Server.TLS.CACertPath != "" { + fmt.Printf(" - CA Cert : %s\n", csConfig.API.Server.TLS.CACertPath) + } + if csConfig.API.Server.TLS.CRLPath != "" { + fmt.Printf(" - CRL : %s\n", csConfig.API.Server.TLS.CRLPath) + } + if csConfig.API.Server.TLS.CacheExpiration != nil { + fmt.Printf(" - Cache Expiration : %s\n", csConfig.API.Server.TLS.CacheExpiration) + } + if csConfig.API.Server.TLS.ClientVerification != "" { + fmt.Printf(" - Client Verification : %s\n", csConfig.API.Server.TLS.ClientVerification) + } + if csConfig.API.Server.TLS.AllowedAgentsOU != nil { + for _, ou := range csConfig.API.Server.TLS.AllowedAgentsOU { + fmt.Printf(" - Allowed Agents OU : %s\n", ou) + } + } + if csConfig.API.Server.TLS.AllowedBouncersOU != nil { + for _, ou := range csConfig.API.Server.TLS.AllowedBouncersOU { + fmt.Printf(" - Allowed Bouncers OU : %s\n", ou) + } + } + + } + fmt.Printf(" - Trusted IPs: \n") + for _, ip := range csConfig.API.Server.TrustedIPs { + fmt.Printf(" - %s\n", ip) + } + if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.Credentials != nil { + fmt.Printf("Central API:\n") + fmt.Printf(" - URL : %s\n", csConfig.API.Server.OnlineClient.Credentials.URL) + fmt.Printf(" - Login : %s\n", csConfig.API.Server.OnlineClient.Credentials.Login) + fmt.Printf(" - Credentials File : %s\n", csConfig.API.Server.OnlineClient.CredentialsFilePath) + } + } + } + if csConfig.DbConfig != nil { + fmt.Printf(" - Database:\n") + fmt.Printf(" - Type : %s\n", csConfig.DbConfig.Type) + switch csConfig.DbConfig.Type { + case "sqlite": + fmt.Printf(" - Path : %s\n", csConfig.DbConfig.DbPath) + case "mysql", "postgresql", "postgres": + fmt.Printf(" - Host : %s\n", csConfig.DbConfig.Host) + fmt.Printf(" - Port : %d\n", csConfig.DbConfig.Port) + fmt.Printf(" - User : %s\n", csConfig.DbConfig.User) + fmt.Printf(" - DB Name : %s\n", csConfig.DbConfig.DbName) + } + if csConfig.DbConfig.Flush != nil { + if *csConfig.DbConfig.Flush.MaxAge != "" { + fmt.Printf(" - Flush age : %s\n", *csConfig.DbConfig.Flush.MaxAge) + } + if *csConfig.DbConfig.Flush.MaxItems != 0 { + fmt.Printf(" - Flush size : %d\n", *csConfig.DbConfig.Flush.MaxItems) + } + } + } + case "json": + data, err := json.MarshalIndent(csConfig, "", " ") + if err != nil { + log.Fatalf("failed to marshal configuration: %s", err) + } + fmt.Printf("%s\n", string(data)) + case "raw": + data, err := yaml.Marshal(csConfig) + if err != nil { + log.Fatalf("failed to marshal configuration: %s", err) + } + fmt.Printf("%s\n", string(data)) + } + }, + } + cmdConfigShow.Flags().StringVar(&key, "key", "", "Display only this value (Config.API.Server.ListenURI)") + cmdConfig.AddCommand(cmdConfigShow) + + var cmdConfigBackup = &cobra.Command{ + Use: `backup "directory"`, + Short: "Backup current config", + Long: `Backup the current crowdsec configuration including : + +- Main config (config.yaml) +- Simulation config (simulation.yaml) +- Profiles config (profiles.yaml) +- List of scenarios, parsers, postoverflows and collections that are up-to-date +- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections +- Backup of API credentials (local API and online API)`, + Example: `cscli config backup ./my-backup`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if err = cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to get Hub index : %v", err) + } + if err = backupConfigToDirectory(args[0]); err != nil { + log.Fatalf("Failed to backup configurations: %s", err) + } + }, + } + cmdConfig.AddCommand(cmdConfigBackup) + + var cmdConfigRestore = &cobra.Command{ + Use: `restore "directory"`, + Short: `Restore config in backup "directory"`, + Long: `Restore the crowdsec configuration from specified backup "directory" including: + +- Main config (config.yaml) +- Simulation config (simulation.yaml) +- Profiles config (profiles.yaml) +- List of scenarios, parsers, postoverflows and collections that are up-to-date +- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections +- Backup of API credentials (local API and online API)`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if err = cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to get Hub index : %v", err) + } + if err := restoreConfigFromDirectory(args[0]); err != nil { + log.Fatalf("failed restoring configurations from %s : %s", args[0], err) + } + }, + } + cmdConfigRestore.PersistentFlags().BoolVar(&restoreOldBackup, "old-backup", false, "To use when you are upgrading crowdsec v0.X to v1.X and you need to restore backup from v0.X") + cmdConfig.AddCommand(cmdConfigRestore) + + return cmdConfig +} diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go new file mode 100644 index 0000000..b4ad9db --- /dev/null +++ b/cmd/crowdsec-cli/console.go @@ -0,0 +1,278 @@ +package main + +import ( + "context" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io/fs" + "net/url" + "os" + + "github.com/fatih/color" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func NewConsoleCmd() *cobra.Command { + var cmdConsole = &cobra.Command{ + Use: "console [action]", + Short: "Manage interaction with Crowdsec console (https://app.crowdsec.net)", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI { + var fdErr *fs.PathError + if errors.As(err, &fdErr) { + log.Fatalf("Unable to load Local API : %s", fdErr) + } + if err != nil { + log.Fatalf("Unable to load required Local API Configuration : %s", err) + } + log.Fatal("Local API is disabled, please run this command on the local API machine") + } + if csConfig.DisableAPI { + log.Fatal("Local API is disabled, please run this command on the local API machine") + } + if csConfig.API.Server.OnlineClient == nil { + log.Fatalf("No configuration for Central API (CAPI) in '%s'", *csConfig.FilePath) + } + if csConfig.API.Server.OnlineClient.Credentials == nil { + log.Fatal("You must configure Central API (CAPI) with `cscli capi register` before enrolling your instance") + } + return nil + }, + } + + name := "" + overwrite := false + tags := []string{} + + cmdEnroll := &cobra.Command{ + Use: "enroll [enroll-key]", + Short: "Enroll this instance to https://app.crowdsec.net [requires local API]", + Long: ` +Enroll this instance to https://app.crowdsec.net + +You can get your enrollment key by creating an account on https://app.crowdsec.net. +After running this command your will need to validate the enrollment in the webapp.`, + Example: `cscli console enroll YOUR-ENROLL-KEY + cscli console enroll --name [instance_name] YOUR-ENROLL-KEY + cscli console enroll --name [instance_name] --tags [tag_1] --tags [tag_2] YOUR-ENROLL-KEY +`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + password := strfmt.Password(csConfig.API.Server.OnlineClient.Credentials.Password) + apiURL, err := url.Parse(csConfig.API.Server.OnlineClient.Credentials.URL) + if err != nil { + log.Fatalf("Could not parse CAPI URL : %s", err) + } + + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Fatalf("Failed to load hub index : %s", err) + log.Info("Run 'sudo cscli hub update' to get the hub index") + } + + scenarios, err := cwhub.GetInstalledScenariosAsString() + if err != nil { + log.Fatalf("failed to get scenarios : %s", err) + } + + if len(scenarios) == 0 { + scenarios = make([]string, 0) + } + + c, _ := apiclient.NewClient(&apiclient.Config{ + MachineID: csConfig.API.Server.OnlineClient.Credentials.Login, + Password: password, + Scenarios: scenarios, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v2", + }) + resp, err := c.Auth.EnrollWatcher(context.Background(), args[0], name, tags, overwrite) + if err != nil { + log.Fatalf("Could not enroll instance: %s", err) + } + if resp.Response.StatusCode == 200 && !overwrite { + log.Warning("Instance already enrolled. You can use '--overwrite' to force enroll") + return + } + + SetConsoleOpts(csconfig.CONSOLE_CONFIGS, true) + if err := csConfig.API.Server.DumpConsoleConfig(); err != nil { + log.Fatalf("failed writing console config : %s", err) + } + log.Infof("Enabled tainted&manual alerts sharing, see 'cscli console status'.") + log.Infof("Watcher successfully enrolled. Visit https://app.crowdsec.net to accept it.") + log.Infof("Please restart crowdsec after accepting the enrollment.") + }, + } + cmdEnroll.Flags().StringVarP(&name, "name", "n", "", "Name to display in the console") + cmdEnroll.Flags().BoolVarP(&overwrite, "overwrite", "", false, "Force enroll the instance") + cmdEnroll.Flags().StringSliceVarP(&tags, "tags", "t", tags, "Tags to display in the console") + cmdConsole.AddCommand(cmdEnroll) + + var enableAll, disableAll bool + + cmdEnable := &cobra.Command{ + Use: "enable [feature-flag]", + Short: "Enable a feature flag", + Example: "enable tainted", + Long: ` +Enable given information push to the central API. Allows to empower the console`, + ValidArgs: csconfig.CONSOLE_CONFIGS, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if enableAll { + SetConsoleOpts(csconfig.CONSOLE_CONFIGS, true) + log.Infof("All features have been enabled successfully") + } else { + if len(args) == 0 { + log.Fatalf("You must specify at least one feature to enable") + } + SetConsoleOpts(args, true) + log.Infof("%v have been enabled", args) + } + if err := csConfig.API.Server.DumpConsoleConfig(); err != nil { + log.Fatalf("failed writing console config : %s", err) + } + log.Infof(ReloadMessage()) + }, + } + cmdEnable.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all feature flags") + cmdConsole.AddCommand(cmdEnable) + + cmdDisable := &cobra.Command{ + Use: "disable [feature-flag]", + Short: "Disable a feature flag", + Example: "disable tainted", + Long: ` +Disable given information push to the central API.`, + ValidArgs: csconfig.CONSOLE_CONFIGS, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if disableAll { + SetConsoleOpts(csconfig.CONSOLE_CONFIGS, false) + } else { + SetConsoleOpts(args, false) + } + + if err := csConfig.API.Server.DumpConsoleConfig(); err != nil { + log.Fatalf("failed writing console config : %s", err) + } + if disableAll { + log.Infof("All features have been disabled") + } else { + log.Infof("%v have been disabled", args) + } + log.Infof(ReloadMessage()) + }, + } + cmdDisable.Flags().BoolVarP(&disableAll, "all", "a", false, "Enable all feature flags") + cmdConsole.AddCommand(cmdDisable) + + cmdConsoleStatus := &cobra.Command{ + Use: "status [feature-flag]", + Short: "Shows status of one or all feature flags", + Example: "status tainted", + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + switch csConfig.Cscli.Output { + case "human": + cmdConsoleStatusTable(color.Output, *csConfig) + case "json": + data, err := json.MarshalIndent(csConfig.API.Server.ConsoleConfig, "", " ") + if err != nil { + log.Fatalf("failed to marshal configuration: %s", err) + } + fmt.Printf("%s\n", string(data)) + case "raw": + csvwriter := csv.NewWriter(os.Stdout) + err := csvwriter.Write([]string{"option", "enabled"}) + if err != nil { + log.Fatal(err) + } + + rows := [][]string{ + {"share_manual_decisions", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareManualDecisions)}, + {"share_custom", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios)}, + {"share_tainted", fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios)}, + } + for _, row := range rows { + err = csvwriter.Write(row) + if err != nil { + log.Fatal(err) + } + } + csvwriter.Flush() + } + }, + } + + cmdConsole.AddCommand(cmdConsoleStatus) + return cmdConsole +} + +func SetConsoleOpts(args []string, wanted bool) { + for _, arg := range args { + switch arg { + case csconfig.SEND_CUSTOM_SCENARIOS: + /*for each flag check if it's already set before setting it*/ + if csConfig.API.Server.ConsoleConfig.ShareCustomScenarios != nil { + if *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios == wanted { + log.Infof("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) + } else { + log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) + *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios = wanted + } + } else { + log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) + csConfig.API.Server.ConsoleConfig.ShareCustomScenarios = types.BoolPtr(wanted) + } + case csconfig.SEND_TAINTED_SCENARIOS: + /*for each flag check if it's already set before setting it*/ + if csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios != nil { + if *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios == wanted { + log.Infof("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) + } else { + log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) + *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios = wanted + } + } else { + log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) + csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios = types.BoolPtr(wanted) + } + case csconfig.SEND_MANUAL_SCENARIOS: + /*for each flag check if it's already set before setting it*/ + if csConfig.API.Server.ConsoleConfig.ShareManualDecisions != nil { + if *csConfig.API.Server.ConsoleConfig.ShareManualDecisions == wanted { + log.Infof("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) + } else { + log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) + *csConfig.API.Server.ConsoleConfig.ShareManualDecisions = wanted + } + } else { + log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) + csConfig.API.Server.ConsoleConfig.ShareManualDecisions = types.BoolPtr(wanted) + } + default: + log.Fatalf("unknown flag %s", arg) + } + } + +} diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/console_table.go new file mode 100644 index 0000000..014ffc9 --- /dev/null +++ b/cmd/crowdsec-cli/console_table.go @@ -0,0 +1,48 @@ +package main + +import ( + "io" + + "github.com/aquasecurity/table" + "github.com/enescakir/emoji" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" +) + +func cmdConsoleStatusTable(out io.Writer, csConfig csconfig.Config) { + t := newTable(out) + t.SetRowLines(false) + + t.SetHeaders("Option Name", "Activated", "Description") + t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + + for _, option := range csconfig.CONSOLE_CONFIGS { + switch option { + case csconfig.SEND_CUSTOM_SCENARIOS: + activated := string(emoji.CrossMark) + if *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios { + activated = string(emoji.CheckMarkButton) + } + + t.AddRow(option, activated, "Send alerts from custom scenarios to the console") + + case csconfig.SEND_MANUAL_SCENARIOS: + activated := string(emoji.CrossMark) + if *csConfig.API.Server.ConsoleConfig.ShareManualDecisions { + activated = string(emoji.CheckMarkButton) + } + + t.AddRow(option, activated, "Send manual decisions to the console") + + case csconfig.SEND_TAINTED_SCENARIOS: + activated := string(emoji.CrossMark) + if *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios { + activated = string(emoji.CheckMarkButton) + } + + t.AddRow(option, activated, "Send alerts from tainted scenarios to the console") + } + } + + t.Render() +} diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go new file mode 100644 index 0000000..899b019 --- /dev/null +++ b/cmd/crowdsec-cli/dashboard.go @@ -0,0 +1,336 @@ +package main + +import ( + "errors" + "fmt" + "math" + "os" + "os/exec" + "os/user" + "path/filepath" + "strconv" + "strings" + "unicode" + + "github.com/AlecAivazis/survey/v2" + "github.com/pbnjay/memory" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/metabase" +) + +var ( + metabaseUser = "crowdsec@crowdsec.net" + metabasePassword string + metabaseDbPath string + metabaseConfigPath string + metabaseConfigFolder = "metabase/" + metabaseConfigFile = "metabase.yaml" + /**/ + metabaseListenAddress = "127.0.0.1" + metabaseListenPort = "3000" + metabaseContainerID = "crowdsec-metabase" + crowdsecGroup = "crowdsec" + + forceYes bool + + /*informations needed to setup a random password on user's behalf*/ +) + +func NewDashboardCmd() *cobra.Command { + /* ---- UPDATE COMMAND */ + var cmdDashboard = &cobra.Command{ + Use: "dashboard [command]", + Short: "Manage your metabase dashboard container [requires local API]", + Long: `Install/Start/Stop/Remove a metabase container exposing dashboard and metrics. +Note: This command requires database direct access, so is intended to be run on Local API/master. + `, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + Example: ` +cscli dashboard setup +cscli dashboard start +cscli dashboard stop +cscli dashboard remove +`, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + if err := metabase.TestAvailability(); err != nil { + log.Fatalf("%s", err) + } + + if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI { + log.Fatal("Local API is disabled, please run this command on the local API machine") + } + + metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder) + metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile) + if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil { + log.Fatalf(err.Error()) + } + if err := csConfig.LoadDBConfig(); err != nil { + log.Errorf("This command requires direct database access (must be run on the local API machine)") + log.Fatalf(err.Error()) + } + + /* + Old container name was "/crowdsec-metabase" but podman doesn't + allow '/' in container name. We do this check to not break + existing dashboard setup. + */ + if !metabase.IsContainerExist(metabaseContainerID) { + oldContainerID := fmt.Sprintf("/%s", metabaseContainerID) + if metabase.IsContainerExist(oldContainerID) { + metabaseContainerID = oldContainerID + } + } + }, + } + + var force bool + var cmdDashSetup = &cobra.Command{ + Use: "setup", + Short: "Setup a metabase container.", + Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Example: ` +cscli dashboard setup +cscli dashboard setup --listen 0.0.0.0 +cscli dashboard setup -l 0.0.0.0 -p 443 --password + `, + Run: func(cmd *cobra.Command, args []string) { + if metabaseDbPath == "" { + metabaseDbPath = csConfig.ConfigPaths.DataDir + } + + if metabasePassword == "" { + isValid := passwordIsValid(metabasePassword) + for !isValid { + metabasePassword = generatePassword(16) + isValid = passwordIsValid(metabasePassword) + } + } + var answer bool + if valid, err := checkSystemMemory(); err == nil && !valid { + if !forceYes { + prompt := &survey.Confirm{ + Message: "Metabase requires 1-2GB of RAM, your system is below this requirement continue ?", + Default: true, + } + if err := survey.AskOne(prompt, &answer); err != nil { + log.Warnf("unable to ask about RAM check: %s", err) + } + if !answer { + log.Fatal("Unable to continue due to RAM requirement") + } + } else { + log.Warnf("Metabase requires 1-2GB of RAM, your system is below this requirement") + } + } + groupExist := false + dockerGroup, err := user.LookupGroup(crowdsecGroup) + if err == nil { + groupExist = true + } + if !forceYes && !groupExist { + prompt := &survey.Confirm{ + Message: fmt.Sprintf("For metabase docker to be able to access SQLite file we need to add a new group called '%s' to the system, is it ok for you ?", crowdsecGroup), + Default: true, + } + if err := survey.AskOne(prompt, &answer); err != nil { + log.Fatalf("unable to ask to force: %s", err) + } + } + if !answer && !forceYes && !groupExist { + log.Fatalf("unable to continue without creating '%s' group", crowdsecGroup) + } + if !groupExist { + groupAddCmd, err := exec.LookPath("groupadd") + if err != nil { + log.Fatalf("unable to find 'groupadd' command, can't continue") + } + + groupAdd := &exec.Cmd{Path: groupAddCmd, Args: []string{groupAddCmd, crowdsecGroup}} + if err := groupAdd.Run(); err != nil { + log.Fatalf("unable to add group '%s': %s", dockerGroup, err) + } + dockerGroup, err = user.LookupGroup(crowdsecGroup) + if err != nil { + log.Fatalf("unable to lookup '%s' group: %+v", dockerGroup, err) + } + } + intID, err := strconv.Atoi(dockerGroup.Gid) + if err != nil { + log.Fatalf("unable to convert group ID to int: %s", err) + } + if err := os.Chown(csConfig.DbConfig.DbPath, 0, intID); err != nil { + log.Fatalf("unable to chown sqlite db file '%s': %s", csConfig.DbConfig.DbPath, err) + } + + mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID) + if err != nil { + log.Fatalf(err.Error()) + } + + if err := mb.DumpConfig(metabaseConfigPath); err != nil { + log.Fatalf(err.Error()) + } + + log.Infof("Metabase is ready") + fmt.Println() + fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL) + fmt.Printf("\tusername : '%s'\n", mb.Config.Username) + fmt.Printf("\tpassword : '%s'\n", mb.Config.Password) + }, + } + cmdDashSetup.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files.") + cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container.") + cmdDashSetup.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container") + cmdDashSetup.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") + cmdDashSetup.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes") + //cmdDashSetup.Flags().StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") + cmdDashSetup.Flags().StringVar(&metabasePassword, "password", "", "metabase password") + + cmdDashboard.AddCommand(cmdDashSetup) + + var cmdDashStart = &cobra.Command{ + Use: "start", + Short: "Start the metabase container.", + Long: `Stats the metabase container using docker.`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID) + if err != nil { + log.Fatalf(err.Error()) + } + if err := mb.Container.Start(); err != nil { + log.Fatalf("Failed to start metabase container : %s", err) + } + log.Infof("Started metabase") + log.Infof("url : http://%s:%s", metabaseListenAddress, metabaseListenPort) + }, + } + cmdDashboard.AddCommand(cmdDashStart) + + var cmdDashStop = &cobra.Command{ + Use: "stop", + Short: "Stops the metabase container.", + Long: `Stops the metabase container using docker.`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := metabase.StopContainer(metabaseContainerID); err != nil { + log.Fatalf("unable to stop container '%s': %s", metabaseContainerID, err) + } + }, + } + cmdDashboard.AddCommand(cmdDashStop) + + var cmdDashShowPassword = &cobra.Command{Use: "show-password", + Short: "displays password of metabase.", + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + m := metabase.Metabase{} + if err := m.LoadConfig(metabaseConfigPath); err != nil { + log.Fatal(err) + } + log.Printf("'%s'", m.Config.Password) + }, + } + cmdDashboard.AddCommand(cmdDashShowPassword) + + var cmdDashRemove = &cobra.Command{ + Use: "remove", + Short: "removes the metabase container.", + Long: `removes the metabase container using docker.`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Example: ` +cscli dashboard remove +cscli dashboard remove --force + `, + Run: func(cmd *cobra.Command, args []string) { + answer := true + if !forceYes { + prompt := &survey.Confirm{ + Message: "Do you really want to remove crowdsec dashboard? (all your changes will be lost)", + Default: true, + } + if err := survey.AskOne(prompt, &answer); err != nil { + log.Fatalf("unable to ask to force: %s", err) + } + } + if answer { + if metabase.IsContainerExist(metabaseContainerID) { + log.Debugf("Stopping container %s", metabaseContainerID) + if err := metabase.StopContainer(metabaseContainerID); err != nil { + log.Warningf("unable to stop container '%s': %s", metabaseContainerID, err) + } + dockerGroup, err := user.LookupGroup(crowdsecGroup) + if err == nil { // if group exist, remove it + groupDelCmd, err := exec.LookPath("groupdel") + if err != nil { + log.Fatalf("unable to find 'groupdel' command, can't continue") + } + + groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}} + if err := groupDel.Run(); err != nil { + log.Errorf("unable to delete group '%s': %s", dockerGroup, err) + } + } + log.Debugf("Removing container %s", metabaseContainerID) + if err := metabase.RemoveContainer(metabaseContainerID); err != nil { + log.Warningf("unable to remove container '%s': %s", metabaseContainerID, err) + } + log.Infof("container %s stopped & removed", metabaseContainerID) + } + log.Debugf("Removing metabase db %s", csConfig.ConfigPaths.DataDir) + if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil { + log.Warningf("failed to remove metabase internal db : %s", err) + } + if force { + if err := metabase.RemoveImageContainer(); err != nil { + if !strings.Contains(err.Error(), "No such image") { + log.Fatalf("removing docker image: %s", err) + } + } + } + } + }, + } + cmdDashRemove.Flags().BoolVarP(&force, "force", "f", false, "Remove also the metabase image") + cmdDashRemove.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes") + cmdDashboard.AddCommand(cmdDashRemove) + + return cmdDashboard +} + +func passwordIsValid(password string) bool { + hasDigit := false + for _, j := range password { + if unicode.IsDigit(j) { + hasDigit = true + break + } + } + + if !hasDigit || len(password) < 6 { + return false + } + return true + +} + +func checkSystemMemory() (bool, error) { + totMem := memory.TotalMemory() + if totMem == 0 { + return true, errors.New("Unable to get system total memory") + } + if uint64(math.Pow(2, 30)) >= totMem { + return false, nil + } + return true, nil +} diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go new file mode 100644 index 0000000..fab83a7 --- /dev/null +++ b/cmd/crowdsec-cli/decisions.go @@ -0,0 +1,615 @@ +package main + +import ( + "context" + "encoding/csv" + "encoding/json" + "fmt" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/fatih/color" + "github.com/go-openapi/strfmt" + "github.com/jszwec/csvutil" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +var Client *apiclient.ApiClient + +var ( + defaultDuration = "4h" + defaultScope = "ip" + defaultType = "ban" + defaultReason = "manual" +) + +func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { + /*here we cheat a bit : to make it more readable for the user, we dedup some entries*/ + var spamLimit map[string]bool = make(map[string]bool) + var skipped = 0 + + for aIdx := 0; aIdx < len(*alerts); aIdx++ { + alertItem := (*alerts)[aIdx] + newDecisions := make([]*models.Decision, 0) + for _, decisionItem := range alertItem.Decisions { + spamKey := fmt.Sprintf("%t:%s:%s:%s", *decisionItem.Simulated, *decisionItem.Type, *decisionItem.Scope, *decisionItem.Value) + if _, ok := spamLimit[spamKey]; ok { + skipped++ + continue + } + spamLimit[spamKey] = true + newDecisions = append(newDecisions, decisionItem) + } + alertItem.Decisions = newDecisions + } + if csConfig.Cscli.Output == "raw" { + csvwriter := csv.NewWriter(os.Stdout) + header := []string{"id", "source", "ip", "reason", "action", "country", "as", "events_count", "expiration", "simulated", "alert_id"} + if printMachine { + header = append(header, "machine") + } + err := csvwriter.Write(header) + if err != nil { + return err + } + for _, alertItem := range *alerts { + for _, decisionItem := range alertItem.Decisions { + raw := []string{ + fmt.Sprintf("%d", decisionItem.ID), + *decisionItem.Origin, + *decisionItem.Scope + ":" + *decisionItem.Value, + *decisionItem.Scenario, + *decisionItem.Type, + alertItem.Source.Cn, + alertItem.Source.GetAsNumberName(), + fmt.Sprintf("%d", *alertItem.EventsCount), + *decisionItem.Duration, + fmt.Sprintf("%t", *decisionItem.Simulated), + fmt.Sprintf("%d", alertItem.ID), + } + if printMachine { + raw = append(raw, alertItem.MachineID) + } + + err := csvwriter.Write(raw) + if err != nil { + return err + } + } + } + csvwriter.Flush() + } else if csConfig.Cscli.Output == "json" { + x, _ := json.MarshalIndent(alerts, "", " ") + fmt.Printf("%s", string(x)) + } else if csConfig.Cscli.Output == "human" { + if len(*alerts) == 0 { + fmt.Println("No active decisions") + return nil + } + decisionsTable(color.Output, alerts, printMachine) + if skipped > 0 { + fmt.Printf("%d duplicated entries skipped\n", skipped) + } + } + return nil +} + +func NewDecisionsCmd() *cobra.Command { + /* ---- DECISIONS COMMAND */ + var cmdDecisions = &cobra.Command{ + Use: "decisions [action]", + Short: "Manage decisions", + Long: `Add/List/Delete/Import decisions from LAPI`, + Example: `cscli decisions [action] [filter]`, + Aliases: []string{"decision"}, + /*TBD example*/ + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := csConfig.LoadAPIClient(); err != nil { + return errors.Wrap(err, "loading api client") + } + password := strfmt.Password(csConfig.API.Client.Credentials.Password) + apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL) + if err != nil { + return errors.Wrapf(err, "parsing api url %s", csConfig.API.Client.Credentials.URL) + } + Client, err = apiclient.NewClient(&apiclient.Config{ + MachineID: csConfig.API.Client.Credentials.Login, + Password: password, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiurl, + VersionPrefix: "v1", + }) + if err != nil { + return errors.Wrap(err, "creating api client") + } + return nil + }, + } + + var filter = apiclient.AlertsListOpts{ + ValueEquals: new(string), + ScopeEquals: new(string), + ScenarioEquals: new(string), + OriginEquals: new(string), + IPEquals: new(string), + RangeEquals: new(string), + Since: new(string), + Until: new(string), + TypeEquals: new(string), + IncludeCAPI: new(bool), + Limit: new(int), + } + NoSimu := new(bool) + contained := new(bool) + var printMachine bool + var cmdDecisionsList = &cobra.Command{ + Use: "list [options]", + Short: "List decisions from LAPI", + Example: `cscli decisions list -i 1.2.3.4 +cscli decisions list -r 1.2.3.0/24 +cscli decisions list -s crowdsecurity/ssh-bf +cscli decisions list -t ban +`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + /*take care of shorthand options*/ + if err := manageCliDecisionAlerts(filter.IPEquals, filter.RangeEquals, filter.ScopeEquals, filter.ValueEquals); err != nil { + log.Fatalf("%s", err) + } + filter.ActiveDecisionEquals = new(bool) + *filter.ActiveDecisionEquals = true + if NoSimu != nil && *NoSimu { + filter.IncludeSimulated = new(bool) + } + /* nullify the empty entries to avoid bad filter */ + if *filter.Until == "" { + filter.Until = nil + } else if strings.HasSuffix(*filter.Until, "d") { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + realDuration := strings.TrimSuffix(*filter.Until, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + printHelp(cmd) + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) + } + *filter.Until = fmt.Sprintf("%d%s", days*24, "h") + } + + if *filter.Since == "" { + filter.Since = nil + } else if strings.HasSuffix(*filter.Since, "d") { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + realDuration := strings.TrimSuffix(*filter.Since, "d") + days, err := strconv.Atoi(realDuration) + if err != nil { + printHelp(cmd) + log.Fatalf("Can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) + } + *filter.Since = fmt.Sprintf("%d%s", days*24, "h") + } + if *filter.IncludeCAPI { + *filter.Limit = 0 + } + if *filter.TypeEquals == "" { + filter.TypeEquals = nil + } + if *filter.ValueEquals == "" { + filter.ValueEquals = nil + } + if *filter.ScopeEquals == "" { + filter.ScopeEquals = nil + } + if *filter.ScenarioEquals == "" { + filter.ScenarioEquals = nil + } + if *filter.IPEquals == "" { + filter.IPEquals = nil + } + if *filter.RangeEquals == "" { + filter.RangeEquals = nil + } + + if *filter.OriginEquals == "" { + filter.OriginEquals = nil + } + + if contained != nil && *contained { + filter.Contains = new(bool) + } + + alerts, _, err := Client.Alerts.List(context.Background(), filter) + if err != nil { + log.Fatalf("Unable to list decisions : %v", err) + } + + err = DecisionsToTable(alerts, printMachine) + if err != nil { + log.Fatalf("unable to list decisions : %v", err) + } + }, + } + cmdDecisionsList.Flags().SortFlags = false + cmdDecisionsList.Flags().BoolVarP(filter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") + cmdDecisionsList.Flags().StringVar(filter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") + cmdDecisionsList.Flags().StringVar(filter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") + cmdDecisionsList.Flags().StringVarP(filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)") + cmdDecisionsList.Flags().StringVar(filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)") + cmdDecisionsList.Flags().StringVar(filter.OriginEquals, "origin", "", "restrict to this origin (ie. lists,CAPI,cscli,cscli-import,crowdsec)") + cmdDecisionsList.Flags().StringVarP(filter.ValueEquals, "value", "v", "", "restrict to this value (ie. 1.2.3.4,userName)") + cmdDecisionsList.Flags().StringVarP(filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)") + cmdDecisionsList.Flags().StringVarP(filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") + cmdDecisionsList.Flags().StringVarP(filter.RangeEquals, "range", "r", "", "restrict to alerts from this source range (shorthand for --scope range --value )") + cmdDecisionsList.Flags().IntVarP(filter.Limit, "limit", "l", 100, "number of alerts to get (use 0 to remove the limit)") + cmdDecisionsList.Flags().BoolVar(NoSimu, "no-simu", false, "exclude decisions in simulation mode") + cmdDecisionsList.Flags().BoolVarP(&printMachine, "machine", "m", false, "print machines that triggered decisions") + cmdDecisionsList.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") + + cmdDecisions.AddCommand(cmdDecisionsList) + + var ( + addIP string + addRange string + addDuration string + addValue string + addScope string + addReason string + addType string + ) + + var cmdDecisionsAdd = &cobra.Command{ + Use: "add [options]", + Short: "Add decision to LAPI", + Example: `cscli decisions add --ip 1.2.3.4 +cscli decisions add --range 1.2.3.0/24 +cscli decisions add --ip 1.2.3.4 --duration 24h --type captcha +cscli decisions add --scope username --value foobar +`, + /*TBD : fix long and example*/ + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + var ipRange string + alerts := models.AddAlertsRequest{} + origin := "cscli" + capacity := int32(0) + leakSpeed := "0" + eventsCount := int32(1) + empty := "" + simulated := false + startAt := time.Now().UTC().Format(time.RFC3339) + stopAt := time.Now().UTC().Format(time.RFC3339) + createdAt := time.Now().UTC().Format(time.RFC3339) + + /*take care of shorthand options*/ + if err := manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { + log.Fatalf("%s", err) + } + + if addIP != "" { + addValue = addIP + addScope = types.Ip + } else if addRange != "" { + addValue = addRange + addScope = types.Range + } else if addValue == "" { + printHelp(cmd) + log.Fatalf("Missing arguments, a value is required (--ip, --range or --scope and --value)") + } + + if addReason == "" { + addReason = fmt.Sprintf("manual '%s' from '%s'", addType, csConfig.API.Client.Credentials.Login) + } + decision := models.Decision{ + Duration: &addDuration, + Scope: &addScope, + Value: &addValue, + Type: &addType, + Scenario: &addReason, + Origin: &origin, + } + alert := models.Alert{ + Capacity: &capacity, + Decisions: []*models.Decision{&decision}, + Events: []*models.Event{}, + EventsCount: &eventsCount, + Leakspeed: &leakSpeed, + Message: &addReason, + ScenarioHash: &empty, + Scenario: &addReason, + ScenarioVersion: &empty, + Simulated: &simulated, + Source: &models.Source{ + AsName: empty, + AsNumber: empty, + Cn: empty, + IP: addValue, + Range: ipRange, + Scope: &addScope, + Value: &addValue, + }, + StartAt: &startAt, + StopAt: &stopAt, + CreatedAt: createdAt, + } + alerts = append(alerts, &alert) + + _, _, err = Client.Alerts.Add(context.Background(), alerts) + if err != nil { + log.Fatalf(err.Error()) + } + + log.Info("Decision successfully added") + }, + } + + cmdDecisionsAdd.Flags().SortFlags = false + cmdDecisionsAdd.Flags().StringVarP(&addIP, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + cmdDecisionsAdd.Flags().StringVarP(&addRange, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + cmdDecisionsAdd.Flags().StringVarP(&addDuration, "duration", "d", "4h", "Decision duration (ie. 1h,4h,30m)") + cmdDecisionsAdd.Flags().StringVarP(&addValue, "value", "v", "", "The value (ie. --scope username --value foobar)") + cmdDecisionsAdd.Flags().StringVar(&addScope, "scope", types.Ip, "Decision scope (ie. ip,range,username)") + cmdDecisionsAdd.Flags().StringVarP(&addReason, "reason", "R", "", "Decision reason (ie. scenario-name)") + cmdDecisionsAdd.Flags().StringVarP(&addType, "type", "t", "ban", "Decision type (ie. ban,captcha,throttle)") + cmdDecisions.AddCommand(cmdDecisionsAdd) + + var delFilter = apiclient.DecisionsDeleteOpts{ + ScopeEquals: new(string), + ValueEquals: new(string), + TypeEquals: new(string), + IPEquals: new(string), + RangeEquals: new(string), + ScenarioEquals: new(string), + } + var delDecisionId string + var delDecisionAll bool + var cmdDecisionsDelete = &cobra.Command{ + Use: "delete [options]", + Short: "Delete decisions", + DisableAutoGenTag: true, + Aliases: []string{"remove"}, + Example: `cscli decisions delete -r 1.2.3.0/24 +cscli decisions delete -i 1.2.3.4 +cscli decisions delete --id 42 +cscli decisions delete --type captcha +`, + /*TBD : refaire le Long/Example*/ + PreRun: func(cmd *cobra.Command, args []string) { + if delDecisionAll { + return + } + if *delFilter.ScopeEquals == "" && *delFilter.ValueEquals == "" && + *delFilter.TypeEquals == "" && *delFilter.IPEquals == "" && + *delFilter.RangeEquals == "" && *delFilter.ScenarioEquals == "" && delDecisionId == "" { + cmd.Usage() + log.Fatalln("At least one filter or --all must be specified") + } + }, + Run: func(cmd *cobra.Command, args []string) { + var err error + var decisions *models.DeleteDecisionResponse + + /*take care of shorthand options*/ + if err := manageCliDecisionAlerts(delFilter.IPEquals, delFilter.RangeEquals, delFilter.ScopeEquals, delFilter.ValueEquals); err != nil { + log.Fatalf("%s", err) + } + if *delFilter.ScopeEquals == "" { + delFilter.ScopeEquals = nil + } + if *delFilter.ValueEquals == "" { + delFilter.ValueEquals = nil + } + if *delFilter.ScenarioEquals == "" { + delFilter.ScenarioEquals = nil + } + + if *delFilter.TypeEquals == "" { + delFilter.TypeEquals = nil + } + + if *delFilter.IPEquals == "" { + delFilter.IPEquals = nil + } + + if *delFilter.RangeEquals == "" { + delFilter.RangeEquals = nil + } + if contained != nil && *contained { + delFilter.Contains = new(bool) + } + + if delDecisionId == "" { + decisions, _, err = Client.Decisions.Delete(context.Background(), delFilter) + if err != nil { + log.Fatalf("Unable to delete decisions : %v", err) + } + } else { + if _, err = strconv.Atoi(delDecisionId); err != nil { + log.Fatalf("id '%s' is not an integer: %v", delDecisionId, err) + } + decisions, _, err = Client.Decisions.DeleteOne(context.Background(), delDecisionId) + if err != nil { + log.Fatalf("Unable to delete decision : %v", err) + } + } + log.Infof("%s decision(s) deleted", decisions.NbDeleted) + }, + } + + cmdDecisionsDelete.Flags().SortFlags = false + cmdDecisionsDelete.Flags().StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + cmdDecisionsDelete.Flags().StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + cmdDecisionsDelete.Flags().StringVarP(delFilter.TypeEquals, "type", "t", "", "the decision type (ie. ban,captcha)") + cmdDecisionsDelete.Flags().StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + cmdDecisionsDelete.Flags().StringVarP(delFilter.ScenarioEquals, "scenario", "s", "", "the scenario name (ie. crowdsecurity/ssh-bf)") + cmdDecisionsDelete.Flags().StringVar(&delDecisionId, "id", "", "decision id") + cmdDecisionsDelete.Flags().BoolVar(&delDecisionAll, "all", false, "delete all decisions") + cmdDecisionsDelete.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") + + cmdDecisions.AddCommand(cmdDecisionsDelete) + + var ( + importDuration string + importScope string + importReason string + importType string + importFile string + ) + + var cmdDecisionImport = &cobra.Command{ + Use: "import [options]", + Short: "Import decisions from json or csv file", + Long: "expected format :\n" + + "csv : any of duration,origin,reason,scope,type,value, with a header line\n" + + `json : {"duration" : "24h", "origin" : "my-list", "reason" : "my_scenario", "scope" : "ip", "type" : "ban", "value" : "x.y.z.z"}`, + DisableAutoGenTag: true, + Example: `decisions.csv : +duration,scope,value +24h,ip,1.2.3.4 + +cscsli decisions import -i decisions.csv + +decisions.json : +[{"duration" : "4h", "scope" : "ip", "type" : "ban", "value" : "1.2.3.4"}] +`, + Run: func(cmd *cobra.Command, args []string) { + if importFile == "" { + log.Fatalf("Please provide a input file containing decisions with -i flag") + } + csvData, err := os.ReadFile(importFile) + if err != nil { + log.Fatalf("unable to open '%s': %s", importFile, err) + } + type decisionRaw struct { + Duration string `csv:"duration,omitempty" json:"duration,omitempty"` + Origin string `csv:"origin,omitempty" json:"origin,omitempty"` + Scenario string `csv:"reason,omitempty" json:"reason,omitempty"` + Scope string `csv:"scope,omitempty" json:"scope,omitempty"` + Type string `csv:"type,omitempty" json:"type,omitempty"` + Value string `csv:"value" json:"value"` + } + var decisionsListRaw []decisionRaw + switch fileFormat := filepath.Ext(importFile); fileFormat { + case ".json": + if err := json.Unmarshal(csvData, &decisionsListRaw); err != nil { + log.Fatalf("unable to unmarshall json: '%s'", err) + } + case ".csv": + if err := csvutil.Unmarshal(csvData, &decisionsListRaw); err != nil { + log.Fatalf("unable to unmarshall csv: '%s'", err) + } + default: + log.Fatalf("file format not supported for '%s'. supported format are 'json' and 'csv'", importFile) + } + + decisionsList := make([]*models.Decision, 0) + for i, decisionLine := range decisionsListRaw { + line := i + 2 + if decisionLine.Value == "" { + log.Fatalf("please provide a 'value' in your csv line %d", line) + } + /*deal with defaults and cli-override*/ + if decisionLine.Duration == "" { + decisionLine.Duration = defaultDuration + log.Debugf("No 'duration' line %d, using default value: '%s'", line, defaultDuration) + } + if importDuration != "" { + decisionLine.Duration = importDuration + log.Debugf("'duration' line %d, using supplied value: '%s'", line, importDuration) + } + decisionLine.Origin = "cscli-import" + + if decisionLine.Scenario == "" { + decisionLine.Scenario = defaultReason + log.Debugf("No 'reason' line %d, using value: '%s'", line, decisionLine.Scenario) + } + if importReason != "" { + decisionLine.Scenario = importReason + log.Debugf("No 'reason' line %d, using supplied value: '%s'", line, importReason) + } + if decisionLine.Type == "" { + decisionLine.Type = defaultType + log.Debugf("No 'type' line %d, using default value: '%s'", line, decisionLine.Type) + } + if importType != "" { + decisionLine.Type = importType + log.Debugf("'type' line %d, using supplied value: '%s'", line, importType) + } + if decisionLine.Scope == "" { + decisionLine.Scope = defaultScope + log.Debugf("No 'scope' line %d, using default value: '%s'", line, decisionLine.Scope) + } + if importScope != "" { + decisionLine.Scope = importScope + log.Debugf("'scope' line %d, using supplied value: '%s'", line, importScope) + } + decision := models.Decision{ + Value: types.StrPtr(decisionLine.Value), + Duration: types.StrPtr(decisionLine.Duration), + Origin: types.StrPtr(decisionLine.Origin), + Scenario: types.StrPtr(decisionLine.Scenario), + Type: types.StrPtr(decisionLine.Type), + Scope: types.StrPtr(decisionLine.Scope), + Simulated: new(bool), + } + decisionsList = append(decisionsList, &decision) + } + alerts := models.AddAlertsRequest{} + importAlert := models.Alert{ + CreatedAt: time.Now().UTC().Format(time.RFC3339), + Scenario: types.StrPtr(fmt.Sprintf("add: %d IPs", len(decisionsList))), + Message: types.StrPtr(""), + Events: []*models.Event{}, + Source: &models.Source{ + Scope: types.StrPtr("cscli/manual-import"), + Value: types.StrPtr(""), + }, + StartAt: types.StrPtr(time.Now().UTC().Format(time.RFC3339)), + StopAt: types.StrPtr(time.Now().UTC().Format(time.RFC3339)), + Capacity: types.Int32Ptr(0), + Simulated: types.BoolPtr(false), + EventsCount: types.Int32Ptr(int32(len(decisionsList))), + Leakspeed: types.StrPtr(""), + ScenarioHash: types.StrPtr(""), + ScenarioVersion: types.StrPtr(""), + Decisions: decisionsList, + } + alerts = append(alerts, &importAlert) + + if len(decisionsList) > 1000 { + log.Infof("You are about to add %d decisions, this may take a while", len(decisionsList)) + } + + _, _, err = Client.Alerts.Add(context.Background(), alerts) + if err != nil { + log.Fatalf(err.Error()) + } + log.Infof("%d decisions successfully imported", len(decisionsList)) + }, + } + + cmdDecisionImport.Flags().SortFlags = false + cmdDecisionImport.Flags().StringVarP(&importFile, "input", "i", "", "Input file") + cmdDecisionImport.Flags().StringVarP(&importDuration, "duration", "d", "", "Decision duration (ie. 1h,4h,30m)") + cmdDecisionImport.Flags().StringVar(&importScope, "scope", types.Ip, "Decision scope (ie. ip,range,username)") + cmdDecisionImport.Flags().StringVarP(&importReason, "reason", "R", "", "Decision reason (ie. scenario-name)") + cmdDecisionImport.Flags().StringVarP(&importType, "type", "t", "", "Decision type (ie. ban,captcha,throttle)") + cmdDecisions.AddCommand(cmdDecisionImport) + + return cmdDecisions +} diff --git a/cmd/crowdsec-cli/decisions_table.go b/cmd/crowdsec-cli/decisions_table.go new file mode 100644 index 0000000..d8d5e03 --- /dev/null +++ b/cmd/crowdsec-cli/decisions_table.go @@ -0,0 +1,46 @@ +package main + +import ( + "fmt" + "io" + "strconv" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +func decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { + t := newTable(out) + t.SetRowLines(false) + header := []string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"} + if printMachine { + header = append(header, "Machine") + } + t.SetHeaders(header...) + + for _, alertItem := range *alerts { + for _, decisionItem := range alertItem.Decisions { + if *alertItem.Simulated { + *decisionItem.Type = fmt.Sprintf("(simul)%s", *decisionItem.Type) + } + row := []string{ + strconv.Itoa(int(decisionItem.ID)), + *decisionItem.Origin, + *decisionItem.Scope + ":" + *decisionItem.Value, + *decisionItem.Scenario, + *decisionItem.Type, + alertItem.Source.Cn, + alertItem.Source.GetAsNumberName(), + strconv.Itoa(int(*alertItem.EventsCount)), + *decisionItem.Duration, + strconv.Itoa(int(alertItem.ID)), + } + + if printMachine { + row = append(row, alertItem.MachineID) + } + + t.AddRow(row...) + } + } + t.Render() +} diff --git a/cmd/crowdsec-cli/explain.go b/cmd/crowdsec-cli/explain.go new file mode 100644 index 0000000..09bb047 --- /dev/null +++ b/cmd/crowdsec-cli/explain.go @@ -0,0 +1,149 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func NewExplainCmd() *cobra.Command { + /* ---- HUB COMMAND */ + var logFile string + var dsn string + var logLine string + var logType string + var opts hubtest.DumpOpts + var err error + + var cmdExplain = &cobra.Command{ + Use: "explain", + Short: "Explain log pipeline", + Long: ` +Explain log pipeline + `, + Example: ` +cscli explain --file ./myfile.log --type nginx +cscli explain --log "Sep 19 18:33:22 scw-d95986 sshd[24347]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=1.2.3.4" --type syslog +cscli explain --dsn "file://myfile.log" --type nginx +tail -n 5 myfile.log | cscli explain --type nginx -f - + `, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + fileInfo, _ := os.Stdin.Stat() + + if logType == "" || (logLine == "" && logFile == "" && dsn == "") { + printHelp(cmd) + fmt.Println() + fmt.Printf("Please provide --type flag\n") + os.Exit(1) + } + + if logFile == "-" && ((fileInfo.Mode() & os.ModeCharDevice) == os.ModeCharDevice) { + log.Fatal("-f - is intended to work with pipes.") + } + + var f *os.File + dir := os.TempDir() + + tmpFile := "" + // we create a temporary log file if a log line/stdin has been provided + if logLine != "" || logFile == "-" { + tmpFile = filepath.Join(dir, "cscli_test_tmp.log") + f, err = os.Create(tmpFile) + if err != nil { + log.Fatal(err) + } + + if logLine != "" { + _, err = f.WriteString(logLine) + if err != nil { + log.Fatal(err) + } + } else if logFile == "-" { + reader := bufio.NewReader(os.Stdin) + errCount := 0 + for { + input, err := reader.ReadBytes('\n') + if err != nil && err == io.EOF { + break + } + _, err = f.Write(input) + if err != nil { + errCount++ + } + } + if errCount > 0 { + log.Warnf("Failed to write %d lines to tmp file", errCount) + } + } + f.Close() + //this is the file that was going to be read by crowdsec anyway + logFile = tmpFile + } + + if logFile != "" { + absolutePath, err := filepath.Abs(logFile) + if err != nil { + log.Fatalf("unable to get absolute path of '%s', exiting", logFile) + } + dsn = fmt.Sprintf("file://%s", absolutePath) + lineCount := types.GetLineCountForFile(absolutePath) + if lineCount > 100 { + log.Warnf("log file contains %d lines. This may take lot of resources.", lineCount) + } + } + + if dsn == "" { + log.Fatal("no acquisition (--file or --dsn) provided, can't run cscli test.") + } + + cmdArgs := []string{"-c", ConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", "./", "-no-api"} + crowdsecCmd := exec.Command("crowdsec", cmdArgs...) + crowdsecCmd.Dir = dir + output, err := crowdsecCmd.CombinedOutput() + if err != nil { + fmt.Println(string(output)) + log.Fatalf("fail to run crowdsec for test: %v", err) + } + + // rm the temporary log file if only a log line/stdin was provided + if tmpFile != "" { + if err := os.Remove(tmpFile); err != nil { + log.Fatalf("unable to remove tmp log file '%s': %+v", tmpFile, err) + } + } + parserDumpFile := filepath.Join(dir, hubtest.ParserResultFileName) + bucketStateDumpFile := filepath.Join(dir, hubtest.BucketPourResultFileName) + + parserDump, err := hubtest.LoadParserDump(parserDumpFile) + if err != nil { + log.Fatalf("unable to load parser dump result: %s", err) + } + + bucketStateDump, err := hubtest.LoadBucketPourDump(bucketStateDumpFile) + if err != nil { + log.Fatalf("unable to load bucket dump result: %s", err) + } + + hubtest.DumpTree(*parserDump, *bucketStateDump, opts) + }, + } + cmdExplain.PersistentFlags().StringVarP(&logFile, "file", "f", "", "Log file to test") + cmdExplain.PersistentFlags().StringVarP(&dsn, "dsn", "d", "", "DSN to test") + cmdExplain.PersistentFlags().StringVarP(&logLine, "log", "l", "", "Log line to test") + cmdExplain.PersistentFlags().StringVarP(&logType, "type", "t", "", "Type of the acquisition to test") + cmdExplain.PersistentFlags().BoolVarP(&opts.Details, "verbose", "v", false, "Display individual changes") + cmdExplain.PersistentFlags().BoolVar(&opts.SkipOk, "failures", false, "Only show failed lines") + + return cmdExplain +} diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go new file mode 100644 index 0000000..6b35eb6 --- /dev/null +++ b/cmd/crowdsec-cli/hub.go @@ -0,0 +1,142 @@ +package main + +import ( + "fmt" + + "github.com/fatih/color" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewHubCmd() *cobra.Command { + /* ---- HUB COMMAND */ + var cmdHub = &cobra.Command{ + Use: "hub [action]", + Short: "Manage Hub", + Long: ` +Hub management + +List/update parsers/scenarios/postoverflows/collections from [Crowdsec Hub](https://hub.crowdsec.net). +The Hub is managed by cscli, to get the latest hub files from [Crowdsec Hub](https://hub.crowdsec.net), you need to update. + `, + Example: ` +cscli hub list # List all installed configurations +cscli hub update # Download list of available configurations from the hub + `, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.Cscli == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + return nil + }, + } + cmdHub.PersistentFlags().StringVarP(&cwhub.HubBranch, "branch", "b", "", "Use given branch from hub") + + var cmdHubList = &cobra.Command{ + Use: "list [-a]", + Short: "List installed configs", + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to get Hub index : %v", err) + } + //use LocalSync to get warnings about tainted / outdated items + _, warn := cwhub.LocalSync(csConfig.Hub) + for _, v := range warn { + log.Info(v) + } + cwhub.DisplaySummary() + ListItems(color.Output, []string{ + cwhub.COLLECTIONS, cwhub.PARSERS, cwhub.SCENARIOS, cwhub.PARSERS_OVFLW, + }, args, true, false, all) + }, + } + cmdHubList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List disabled items as well") + cmdHub.AddCommand(cmdHubList) + + var cmdHubUpdate = &cobra.Command{ + Use: "update", + Short: "Fetch available configs from hub", + Long: ` +Fetches the [.index.json](https://github.com/crowdsecurity/hub/blob/master/.index.json) file from hub, containing the list of available configs. +`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.Cscli == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := cwhub.SetHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if err := cwhub.UpdateHubIdx(csConfig.Hub); err != nil { + log.Fatalf("Failed to get Hub index : %v", err) + } + //use LocalSync to get warnings about tainted / outdated items + _, warn := cwhub.LocalSync(csConfig.Hub) + for _, v := range warn { + log.Info(v) + } + }, + } + cmdHub.AddCommand(cmdHubUpdate) + + var cmdHubUpgrade = &cobra.Command{ + Use: "upgrade", + Short: "Upgrade all configs installed from hub", + Long: ` +Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if you want the latest versions available. +`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if csConfig.Cscli == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := cwhub.SetHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to get Hub index : %v", err) + } + + log.Infof("Upgrading collections") + cwhub.UpgradeConfig(csConfig, cwhub.COLLECTIONS, "", forceAction) + log.Infof("Upgrading parsers") + cwhub.UpgradeConfig(csConfig, cwhub.PARSERS, "", forceAction) + log.Infof("Upgrading scenarios") + cwhub.UpgradeConfig(csConfig, cwhub.SCENARIOS, "", forceAction) + log.Infof("Upgrading postoverflows") + cwhub.UpgradeConfig(csConfig, cwhub.PARSERS_OVFLW, "", forceAction) + }, + } + cmdHubUpgrade.PersistentFlags().BoolVar(&forceAction, "force", false, "Force upgrade : Overwrite tainted and outdated files") + cmdHub.AddCommand(cmdHubUpgrade) + return cmdHub +} diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go new file mode 100644 index 0000000..8023e9d --- /dev/null +++ b/cmd/crowdsec-cli/hubtest.go @@ -0,0 +1,537 @@ +package main + +import ( + "encoding/json" + "fmt" + "math" + "os" + "path/filepath" + "strings" + + "github.com/AlecAivazis/survey/v2" + "github.com/enescakir/emoji" + "github.com/fatih/color" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +var ( + HubTest hubtest.HubTest +) + +func NewHubTestCmd() *cobra.Command { + /* ---- HUB COMMAND */ + var hubPath string + var logType string + var crowdsecPath string + var cscliPath string + + var cmdHubTest = &cobra.Command{ + Use: "hubtest", + Short: "Run functional tests on hub configurations", + Long: "Run functional tests on hub configurations (parsers, scenarios, collections...)", + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var err error + HubTest, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath) + if err != nil { + log.Fatalf("unable to load hubtest: %+v", err) + } + }, + } + cmdHubTest.PersistentFlags().StringVar(&hubPath, "hub", ".", "Path to hub folder") + cmdHubTest.PersistentFlags().StringVar(&crowdsecPath, "crowdsec", "crowdsec", "Path to crowdsec") + cmdHubTest.PersistentFlags().StringVar(&cscliPath, "cscli", "cscli", "Path to cscli") + + parsers := []string{} + postoverflows := []string{} + scenarios := []string{} + var ignoreParsers bool + var labels map[string]string + + var cmdHubTestCreate = &cobra.Command{ + Use: "create", + Short: "create [test_name]", + Example: `cscli hubtest create my-awesome-test --type syslog +cscli hubtest create my-nginx-custom-test --type nginx +cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios crowdsecurity/http-probing`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + testName := args[0] + testPath := filepath.Join(HubTest.HubTestPath, testName) + if _, err := os.Stat(testPath); os.IsExist(err) { + log.Fatalf("test '%s' already exists in '%s', exiting", testName, testPath) + } + + if logType == "" { + log.Fatalf("please provide a type (--type) for the test") + } + + if err := os.MkdirAll(testPath, os.ModePerm); err != nil { + log.Fatalf("unable to create folder '%s': %+v", testPath, err) + } + + // create empty log file + logFileName := fmt.Sprintf("%s.log", testName) + logFilePath := filepath.Join(testPath, logFileName) + logFile, err := os.Create(logFilePath) + if err != nil { + log.Fatal(err) + } + logFile.Close() + + // create empty parser assertion file + parserAssertFilePath := filepath.Join(testPath, hubtest.ParserAssertFileName) + parserAssertFile, err := os.Create(parserAssertFilePath) + if err != nil { + log.Fatal(err) + } + parserAssertFile.Close() + + // create empty scenario assertion file + scenarioAssertFilePath := filepath.Join(testPath, hubtest.ScenarioAssertFileName) + scenarioAssertFile, err := os.Create(scenarioAssertFilePath) + if err != nil { + log.Fatal(err) + } + scenarioAssertFile.Close() + + parsers = append(parsers, "crowdsecurity/syslog-logs") + parsers = append(parsers, "crowdsecurity/dateparse-enrich") + + if len(scenarios) == 0 { + scenarios = append(scenarios, "") + } + + if len(postoverflows) == 0 { + postoverflows = append(postoverflows, "") + } + + configFileData := &hubtest.HubTestItemConfig{ + Parsers: parsers, + Scenarios: scenarios, + PostOVerflows: postoverflows, + LogFile: logFileName, + LogType: logType, + IgnoreParsers: ignoreParsers, + Labels: labels, + } + + configFilePath := filepath.Join(testPath, "config.yaml") + fd, err := os.OpenFile(configFilePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + log.Fatalf("open: %s", err) + } + data, err := yaml.Marshal(configFileData) + if err != nil { + log.Fatalf("marshal: %s", err) + } + _, err = fd.Write(data) + if err != nil { + log.Fatalf("write: %s", err) + } + if err := fd.Close(); err != nil { + log.Fatalf(" close: %s", err) + } + fmt.Println() + fmt.Printf(" Test name : %s\n", testName) + fmt.Printf(" Test path : %s\n", testPath) + fmt.Printf(" Log file : %s (please fill it with logs)\n", logFilePath) + fmt.Printf(" Parser assertion file : %s (please fill it with assertion)\n", parserAssertFilePath) + fmt.Printf(" Scenario assertion file : %s (please fill it with assertion)\n", scenarioAssertFilePath) + fmt.Printf(" Configuration File : %s (please fill it with parsers, scenarios...)\n", configFilePath) + + }, + } + cmdHubTestCreate.PersistentFlags().StringVarP(&logType, "type", "t", "", "Log type of the test") + cmdHubTestCreate.Flags().StringSliceVarP(&parsers, "parsers", "p", parsers, "Parsers to add to test") + cmdHubTestCreate.Flags().StringSliceVar(&postoverflows, "postoverflows", postoverflows, "Postoverflows to add to test") + cmdHubTestCreate.Flags().StringSliceVarP(&scenarios, "scenarios", "s", scenarios, "Scenarios to add to test") + cmdHubTestCreate.PersistentFlags().BoolVar(&ignoreParsers, "ignore-parsers", false, "Don't run test on parsers") + cmdHubTest.AddCommand(cmdHubTestCreate) + + var noClean bool + var runAll bool + var forceClean bool + var cmdHubTestRun = &cobra.Command{ + Use: "run", + Short: "run [test_name]", + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if !runAll && len(args) == 0 { + printHelp(cmd) + fmt.Println("Please provide test to run or --all flag") + os.Exit(1) + } + + if runAll { + if err := HubTest.LoadAllTests(); err != nil { + log.Fatalf("unable to load all tests: %+v", err) + } + } else { + for _, testName := range args { + _, err := HubTest.LoadTestItem(testName) + if err != nil { + log.Fatalf("unable to load test '%s': %s", testName, err) + } + } + } + + for _, test := range HubTest.Tests { + if csConfig.Cscli.Output == "human" { + log.Infof("Running test '%s'", test.Name) + } + err := test.Run() + if err != nil { + log.Errorf("running test '%s' failed: %+v", test.Name, err) + } + } + + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + success := true + testResult := make(map[string]bool) + for _, test := range HubTest.Tests { + if test.AutoGen { + if test.ParserAssert.AutoGenAssert { + log.Warningf("Assert file '%s' is empty, generating assertion:", test.ParserAssert.File) + fmt.Println() + fmt.Println(test.ParserAssert.AutoGenAssertData) + } + if test.ScenarioAssert.AutoGenAssert { + log.Warningf("Assert file '%s' is empty, generating assertion:", test.ScenarioAssert.File) + fmt.Println() + fmt.Println(test.ScenarioAssert.AutoGenAssertData) + } + if !noClean { + if err := test.Clean(); err != nil { + log.Fatalf("unable to clean test '%s' env: %s", test.Name, err) + } + } + fmt.Printf("\nPlease fill your assert file(s) for test '%s', exiting\n", test.Name) + os.Exit(1) + } + testResult[test.Name] = test.Success + if test.Success { + if csConfig.Cscli.Output == "human" { + log.Infof("Test '%s' passed successfully (%d assertions)\n", test.Name, test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert) + } + if !noClean { + if err := test.Clean(); err != nil { + log.Fatalf("unable to clean test '%s' env: %s", test.Name, err) + } + } + } else { + success = false + cleanTestEnv := false + if csConfig.Cscli.Output == "human" { + if len(test.ParserAssert.Fails) > 0 { + fmt.Println() + log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) + for _, fail := range test.ParserAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + fmt.Println() + } + } + if len(test.ScenarioAssert.Fails) > 0 { + fmt.Println() + log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) + for _, fail := range test.ScenarioAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + fmt.Println() + } + } + if !forceClean && !noClean { + prompt := &survey.Confirm{ + Message: fmt.Sprintf("\nDo you want to remove runtime folder for test '%s'? (default: Yes)", test.Name), + Default: true, + } + if err := survey.AskOne(prompt, &cleanTestEnv); err != nil { + log.Fatalf("unable to ask to remove runtime folder: %s", err) + } + } + } + + if cleanTestEnv || forceClean { + if err := test.Clean(); err != nil { + log.Fatalf("unable to clean test '%s' env: %s", test.Name, err) + } + } + } + } + if csConfig.Cscli.Output == "human" { + hubTestResultTable(color.Output, testResult) + } else if csConfig.Cscli.Output == "json" { + jsonResult := make(map[string][]string, 0) + jsonResult["success"] = make([]string, 0) + jsonResult["fail"] = make([]string, 0) + for testName, success := range testResult { + if success { + jsonResult["success"] = append(jsonResult["success"], testName) + } else { + jsonResult["fail"] = append(jsonResult["fail"], testName) + } + } + jsonStr, err := json.Marshal(jsonResult) + if err != nil { + log.Fatalf("unable to json test result: %s", err) + } + fmt.Println(string(jsonStr)) + } + + if !success { + os.Exit(1) + } + }, + } + cmdHubTestRun.Flags().BoolVar(&noClean, "no-clean", false, "Don't clean runtime environment if test succeed") + cmdHubTestRun.Flags().BoolVar(&forceClean, "clean", false, "Clean runtime environment if test fail") + cmdHubTestRun.Flags().BoolVar(&runAll, "all", false, "Run all tests") + cmdHubTest.AddCommand(cmdHubTestRun) + + var cmdHubTestClean = &cobra.Command{ + Use: "clean", + Short: "clean [test_name]", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + for _, testName := range args { + test, err := HubTest.LoadTestItem(testName) + if err != nil { + log.Fatalf("unable to load test '%s': %s", testName, err) + } + if err := test.Clean(); err != nil { + log.Fatalf("unable to clean test '%s' env: %s", test.Name, err) + } + } + }, + } + cmdHubTest.AddCommand(cmdHubTestClean) + + var cmdHubTestInfo = &cobra.Command{ + Use: "info", + Short: "info [test_name]", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + for _, testName := range args { + test, err := HubTest.LoadTestItem(testName) + if err != nil { + log.Fatalf("unable to load test '%s': %s", testName, err) + } + fmt.Println() + fmt.Printf(" Test name : %s\n", test.Name) + fmt.Printf(" Test path : %s\n", test.Path) + fmt.Printf(" Log file : %s\n", filepath.Join(test.Path, test.Config.LogFile)) + fmt.Printf(" Parser assertion file : %s\n", filepath.Join(test.Path, hubtest.ParserAssertFileName)) + fmt.Printf(" Scenario assertion file : %s\n", filepath.Join(test.Path, hubtest.ScenarioAssertFileName)) + fmt.Printf(" Configuration File : %s\n", filepath.Join(test.Path, "config.yaml")) + } + }, + } + cmdHubTest.AddCommand(cmdHubTestInfo) + + var cmdHubTestList = &cobra.Command{ + Use: "list", + Short: "list", + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := HubTest.LoadAllTests(); err != nil { + log.Fatalf("unable to load all tests: %+v", err) + } + + switch csConfig.Cscli.Output { + case "human": + hubTestListTable(color.Output, HubTest.Tests) + case "json": + j, err := json.MarshalIndent(HubTest.Tests, " ", " ") + if err != nil { + log.Fatal(err) + } + fmt.Println(string(j)) + default: + log.Fatalf("only human/json output modes are supported") + } + }, + } + cmdHubTest.AddCommand(cmdHubTestList) + + var showParserCov bool + var showScenarioCov bool + var showOnlyPercent bool + var cmdHubTestCoverage = &cobra.Command{ + Use: "coverage", + Short: "coverage", + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := HubTest.LoadAllTests(); err != nil { + log.Fatalf("unable to load all tests: %+v", err) + } + var err error + scenarioCoverage := []hubtest.ScenarioCoverage{} + parserCoverage := []hubtest.ParserCoverage{} + scenarioCoveragePercent := 0 + parserCoveragePercent := 0 + + // if both are false (flag by default), show both + showAll := !showScenarioCov && !showParserCov + + if showParserCov || showAll { + parserCoverage, err = HubTest.GetParsersCoverage() + if err != nil { + log.Fatalf("while getting parser coverage : %s", err) + } + parserTested := 0 + for _, test := range parserCoverage { + if test.TestsCount > 0 { + parserTested += 1 + } + } + parserCoveragePercent = int(math.Round((float64(parserTested) / float64(len(parserCoverage)) * 100))) + } + + if showScenarioCov || showAll { + scenarioCoverage, err = HubTest.GetScenariosCoverage() + if err != nil { + log.Fatalf("while getting scenario coverage: %s", err) + } + scenarioTested := 0 + for _, test := range scenarioCoverage { + if test.TestsCount > 0 { + scenarioTested += 1 + } + } + scenarioCoveragePercent = int(math.Round((float64(scenarioTested) / float64(len(scenarioCoverage)) * 100))) + } + + if showOnlyPercent { + if showAll { + fmt.Printf("parsers=%d%%\nscenarios=%d%%", parserCoveragePercent, scenarioCoveragePercent) + } else if showParserCov { + fmt.Printf("parsers=%d%%", parserCoveragePercent) + } else if showScenarioCov { + fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) + } + os.Exit(0) + } + + if csConfig.Cscli.Output == "human" { + if showParserCov || showAll { + hubTestParserCoverageTable(color.Output, parserCoverage) + } + + if showScenarioCov || showAll { + hubTestScenarioCoverageTable(color.Output, scenarioCoverage) + } + fmt.Println() + if showParserCov || showAll { + fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) + } + if showScenarioCov || showAll { + fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) + } + } else if csConfig.Cscli.Output == "json" { + dump, err := json.MarshalIndent(parserCoverage, "", " ") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%s", dump) + dump, err = json.MarshalIndent(scenarioCoverage, "", " ") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%s", dump) + } else { + log.Fatalf("only human/json output modes are supported") + } + + }, + } + cmdHubTestCoverage.PersistentFlags().BoolVar(&showOnlyPercent, "percent", false, "Show only percentages of coverage") + cmdHubTestCoverage.PersistentFlags().BoolVar(&showParserCov, "parsers", false, "Show only parsers coverage") + cmdHubTestCoverage.PersistentFlags().BoolVar(&showScenarioCov, "scenarios", false, "Show only scenarios coverage") + cmdHubTest.AddCommand(cmdHubTestCoverage) + + var evalExpression string + var cmdHubTestEval = &cobra.Command{ + Use: "eval", + Short: "eval [test_name]", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + for _, testName := range args { + test, err := HubTest.LoadTestItem(testName) + if err != nil { + log.Fatalf("can't load test: %+v", err) + } + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + log.Fatalf("can't load test results from '%s': %+v", test.ParserResultFile, err) + } + output, err := test.ParserAssert.EvalExpression(evalExpression) + if err != nil { + log.Fatalf(err.Error()) + } + fmt.Print(output) + } + }, + } + cmdHubTestEval.PersistentFlags().StringVarP(&evalExpression, "expr", "e", "", "Expression to eval") + cmdHubTest.AddCommand(cmdHubTestEval) + + var cmdHubTestExplain = &cobra.Command{ + Use: "explain", + Short: "explain [test_name]", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + for _, testName := range args { + test, err := HubTest.LoadTestItem(testName) + if err != nil { + log.Fatalf("can't load test: %+v", err) + } + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + err := test.Run() + if err != nil { + log.Fatalf("running test '%s' failed: %+v", test.Name, err) + } + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + log.Fatalf("unable to load parser result after run: %s", err) + } + } + + err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) + if err != nil { + err := test.Run() + if err != nil { + log.Fatalf("running test '%s' failed: %+v", test.Name, err) + } + err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) + if err != nil { + log.Fatalf("unable to load scenario result after run: %s", err) + } + } + opts := hubtest.DumpOpts{} + hubtest.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) + } + }, + } + cmdHubTest.AddCommand(cmdHubTestExplain) + + return cmdHubTest +} diff --git a/cmd/crowdsec-cli/hubtest_table.go b/cmd/crowdsec-cli/hubtest_table.go new file mode 100644 index 0000000..9f28c36 --- /dev/null +++ b/cmd/crowdsec-cli/hubtest_table.go @@ -0,0 +1,80 @@ +package main + +import ( + "fmt" + "io" + + "github.com/aquasecurity/table" + "github.com/enescakir/emoji" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +func hubTestResultTable(out io.Writer, testResult map[string]bool) { + t := newLightTable(out) + t.SetHeaders("Test", "Result") + t.SetHeaderAlignment(table.AlignLeft) + t.SetAlignment(table.AlignLeft) + + for testName, success := range testResult { + status := emoji.CheckMarkButton.String() + if !success { + status = emoji.CrossMark.String() + } + + t.AddRow(testName, status) + } + + t.Render() +} + +func hubTestListTable(out io.Writer, tests []*hubtest.HubTestItem) { + t := newLightTable(out) + t.SetHeaders("Name", "Path") + t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft) + t.SetAlignment(table.AlignLeft, table.AlignLeft) + + for _, test := range tests { + t.AddRow(test.Name, test.Path) + } + + t.Render() +} + +func hubTestParserCoverageTable(out io.Writer, coverage []hubtest.ParserCoverage) { + t := newLightTable(out) + t.SetHeaders("Parser", "Status", "Number of tests") + t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + + parserTested := 0 + for _, test := range coverage { + status := emoji.RedCircle.String() + if test.TestsCount > 0 { + status = emoji.GreenCircle.String() + parserTested++ + } + t.AddRow(test.Parser, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) + } + + t.Render() +} + +func hubTestScenarioCoverageTable(out io.Writer, coverage []hubtest.ScenarioCoverage) { + t := newLightTable(out) + t.SetHeaders("Scenario", "Status", "Number of tests") + t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + + parserTested := 0 + for _, test := range coverage { + status := emoji.RedCircle.String() + if test.TestsCount > 0 { + status = emoji.GreenCircle.String() + parserTested++ + } + t.AddRow(test.Scenario, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) + } + + t.Render() +} diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go new file mode 100644 index 0000000..8c9d718 --- /dev/null +++ b/cmd/crowdsec-cli/lapi.go @@ -0,0 +1,174 @@ +package main + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +var LAPIURLPrefix string = "v1" +var lapiUser string + +func NewLapiCmd() *cobra.Command { + var cmdLapi = &cobra.Command{ + Use: "lapi [action]", + Short: "Manage interaction with Local API (LAPI)", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := csConfig.LoadAPIClient(); err != nil { + return errors.Wrap(err, "loading api client") + } + return nil + }, + } + + var cmdLapiRegister = &cobra.Command{ + Use: "register", + Short: "Register a machine to Local API (LAPI)", + Long: `Register you machine to the Local API (LAPI). +Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + if lapiUser == "" { + lapiUser, err = generateID("") + if err != nil { + log.Fatalf("unable to generate machine id: %s", err) + } + } + password := strfmt.Password(generatePassword(passwordLength)) + if apiURL == "" { + if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" { + apiURL = csConfig.API.Client.Credentials.URL + } else { + log.Fatalf("No Local API URL. Please provide it in your configuration or with the -u parameter") + } + } + /*URL needs to end with /, but user doesn't care*/ + if !strings.HasSuffix(apiURL, "/") { + apiURL += "/" + } + /*URL needs to start with http://, but user doesn't care*/ + if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") { + apiURL = "http://" + apiURL + } + apiurl, err := url.Parse(apiURL) + if err != nil { + log.Fatalf("parsing api url: %s", err) + } + _, err = apiclient.RegisterClient(&apiclient.Config{ + MachineID: lapiUser, + Password: password, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiurl, + VersionPrefix: LAPIURLPrefix, + }, nil) + + if err != nil { + log.Fatalf("api client register: %s", err) + } + + log.Printf("Successfully registered to Local API (LAPI)") + + var dumpFile string + if outputFile != "" { + dumpFile = outputFile + } else if csConfig.API.Client.CredentialsFilePath != "" { + dumpFile = csConfig.API.Client.CredentialsFilePath + } else { + dumpFile = "" + } + apiCfg := csconfig.ApiCredentialsCfg{ + Login: lapiUser, + Password: password.String(), + URL: apiURL, + } + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + log.Fatalf("unable to marshal api credentials: %s", err) + } + if dumpFile != "" { + err = os.WriteFile(dumpFile, apiConfigDump, 0644) + if err != nil { + log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err) + } + log.Printf("Local API credentials dumped to '%s'", dumpFile) + } else { + fmt.Printf("%s\n", string(apiConfigDump)) + } + log.Warning(ReloadMessage()) + }, + } + cmdLapiRegister.Flags().StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") + cmdLapiRegister.Flags().StringVarP(&outputFile, "file", "f", "", "output file destination") + cmdLapiRegister.Flags().StringVar(&lapiUser, "machine", "", "Name of the machine to register with") + cmdLapi.AddCommand(cmdLapiRegister) + + var cmdLapiStatus = &cobra.Command{ + Use: "status", + Short: "Check authentication to Local API (LAPI)", + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + + password := strfmt.Password(csConfig.API.Client.Credentials.Password) + apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL) + login := csConfig.API.Client.Credentials.Login + if err != nil { + log.Fatalf("parsing api url ('%s'): %s", apiurl, err) + } + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to load hub index : %s", err) + } + scenarios, err := cwhub.GetInstalledScenariosAsString() + if err != nil { + log.Fatalf("failed to get scenarios : %s", err) + } + + Client, err = apiclient.NewDefaultClient(apiurl, + LAPIURLPrefix, + fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + nil) + if err != nil { + log.Fatalf("init default client: %s", err) + } + t := models.WatcherAuthRequest{ + MachineID: &login, + Password: &password, + Scenarios: scenarios, + } + log.Infof("Loaded credentials from %s", csConfig.API.Client.CredentialsFilePath) + log.Infof("Trying to authenticate with username %s on %s", login, apiurl) + _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) + if err != nil { + log.Fatalf("Failed to authenticate to Local API (LAPI) : %s", err) + } else { + log.Infof("You can successfully interact with Local API (LAPI)") + } + }, + } + cmdLapi.AddCommand(cmdLapiStatus) + return cmdLapi +} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go new file mode 100644 index 0000000..5370604 --- /dev/null +++ b/cmd/crowdsec-cli/machines.go @@ -0,0 +1,354 @@ +package main + +import ( + saferand "crypto/rand" + "encoding/csv" + "encoding/json" + "fmt" + "io" + "math/big" + "os" + "strings" + "time" + + "github.com/AlecAivazis/survey/v2" + "github.com/enescakir/emoji" + "github.com/fatih/color" + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/machineid" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +var machineID string +var machinePassword string +var interactive bool +var apiURL string +var outputFile string +var forceAdd bool +var autoAdd bool + +var ( + passwordLength = 64 + upper = "ABCDEFGHIJKLMNOPQRSTUVWXY" + lower = "abcdefghijklmnopqrstuvwxyz" + digits = "0123456789" +) + +func generatePassword(length int) string { + charset := upper + lower + digits + charsetLength := len(charset) + + buf := make([]byte, length) + for i := 0; i < length; i++ { + rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) + if err != nil { + log.Fatalf("failed getting data from prng for password generation : %s", err) + } + buf[i] = charset[rInt.Int64()] + } + + return string(buf) +} + +// Returns a unique identifier for each crowdsec installation, using an +// identifier of the OS installation where available, otherwise a random +// string. +func generateIDPrefix() (string, error) { + prefix, err := machineid.ID() + if err == nil { + return prefix, nil + } + log.Debugf("failed to get machine-id with usual files: %s", err) + + bId, err := uuid.NewRandom() + if err == nil { + return bId.String(), nil + } + return "", errors.Wrap(err, "generating machine id") +} + +// Generate a unique identifier, composed by a prefix and a random suffix. +// The prefix can be provided by a parameter to use in test environments. +func generateID(prefix string) (string, error) { + var err error + if prefix == "" { + prefix, err = generateIDPrefix() + } + if err != nil { + return "", err + } + prefix = strings.ReplaceAll(prefix, "-", "")[:32] + suffix := generatePassword(16) + return prefix + suffix, nil +} + +func displayLastHeartBeat(m *ent.Machine, fancy bool) string { + var hbDisplay string + + if m.LastHeartbeat != nil { + lastHeartBeat := time.Now().UTC().Sub(*m.LastHeartbeat) + hbDisplay = lastHeartBeat.Truncate(time.Second).String() + if fancy && lastHeartBeat > 2*time.Minute { + hbDisplay = fmt.Sprintf("%s %s", emoji.Warning.String(), lastHeartBeat.Truncate(time.Second).String()) + } + } else { + hbDisplay = "-" + if fancy { + hbDisplay = emoji.Warning.String() + " -" + } + } + return hbDisplay +} + +func getAgents(out io.Writer, dbClient *database.Client) error { + machines, err := dbClient.ListMachines() + if err != nil { + return fmt.Errorf("unable to list machines: %s", err) + } + if csConfig.Cscli.Output == "human" { + getAgentsTable(out, machines) + } else if csConfig.Cscli.Output == "json" { + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + if err := enc.Encode(machines); err != nil { + log.Fatalf("failed to unmarshal") + } + return nil + } else if csConfig.Cscli.Output == "raw" { + csvwriter := csv.NewWriter(out) + err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat"}) + if err != nil { + log.Fatalf("failed to write header: %s", err) + } + for _, m := range machines { + var validated string + if m.IsValidated { + validated = "true" + } else { + validated = "false" + } + err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, displayLastHeartBeat(m, false)}) + if err != nil { + log.Fatalf("failed to write raw output : %s", err) + } + } + csvwriter.Flush() + } else { + log.Errorf("unknown output '%s'", csConfig.Cscli.Output) + } + return nil +} + +func NewMachinesCmd() *cobra.Command { + /* ---- DECISIONS COMMAND */ + var cmdMachines = &cobra.Command{ + Use: "machines [action]", + Short: "Manage local API machines [requires local API]", + Long: `To list/add/delete/validate machines. +Note: This command requires database direct access, so is intended to be run on the local API machine. +`, + Example: `cscli machines [action]`, + DisableAutoGenTag: true, + Aliases: []string{"machine"}, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + if err := csConfig.LoadAPIServer(); err != nil || csConfig.DisableAPI { + if err != nil { + log.Errorf("local api : %s", err) + } + log.Fatal("Local API is disabled, please run this command on the local API machine") + } + if err := csConfig.LoadDBConfig(); err != nil { + log.Errorf("This command requires direct database access (must be run on the local API machine)") + log.Fatalf(err.Error()) + } + }, + } + + var cmdMachinesList = &cobra.Command{ + Use: "list", + Short: "List machines", + Long: `List `, + Example: `cscli machines list`, + Args: cobra.MaximumNArgs(1), + DisableAutoGenTag: true, + PreRun: func(cmd *cobra.Command, args []string) { + var err error + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + }, + Run: func(cmd *cobra.Command, args []string) { + err := getAgents(color.Output, dbClient) + if err != nil { + log.Fatalf("unable to list machines: %s", err) + } + }, + } + cmdMachines.AddCommand(cmdMachinesList) + + var cmdMachinesAdd = &cobra.Command{ + Use: "add", + Short: "add machine to the database.", + DisableAutoGenTag: true, + Long: `Register a new machine in the database. cscli should be on the same machine as LAPI.`, + Example: ` +cscli machines add --auto +cscli machines add MyTestMachine --auto +cscli machines add MyTestMachine --password MyPassword +`, + PreRun: func(cmd *cobra.Command, args []string) { + var err error + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + }, + Run: func(cmd *cobra.Command, args []string) { + var dumpFile string + var err error + + // create machineID if not specified by user + if len(args) == 0 { + if !autoAdd { + printHelp(cmd) + return + } + machineID, err = generateID("") + if err != nil { + log.Fatalf("unable to generate machine id : %s", err) + } + } else { + machineID = args[0] + } + + /*check if file already exists*/ + if outputFile != "" { + dumpFile = outputFile + } else if csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" { + dumpFile = csConfig.API.Client.CredentialsFilePath + } + + // create a password if it's not specified by user + if machinePassword == "" && !interactive { + if !autoAdd { + printHelp(cmd) + return + } + machinePassword = generatePassword(passwordLength) + } else if machinePassword == "" && interactive { + qs := &survey.Password{ + Message: "Please provide a password for the machine", + } + survey.AskOne(qs, &machinePassword) + } + password := strfmt.Password(machinePassword) + _, err = dbClient.CreateMachine(&machineID, &password, "", true, forceAdd, types.PasswordAuthType) + if err != nil { + log.Fatalf("unable to create machine: %s", err) + } + log.Infof("Machine '%s' successfully added to the local API", machineID) + + if apiURL == "" { + if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" { + apiURL = csConfig.API.Client.Credentials.URL + } else if csConfig.API.Server != nil && csConfig.API.Server.ListenURI != "" { + apiURL = "http://" + csConfig.API.Server.ListenURI + } else { + log.Fatalf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") + } + } + apiCfg := csconfig.ApiCredentialsCfg{ + Login: machineID, + Password: password.String(), + URL: apiURL, + } + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + log.Fatalf("unable to marshal api credentials: %s", err) + } + if dumpFile != "" && dumpFile != "-" { + err = os.WriteFile(dumpFile, apiConfigDump, 0644) + if err != nil { + log.Fatalf("write api credentials in '%s' failed: %s", dumpFile, err) + } + log.Printf("API credentials dumped to '%s'", dumpFile) + } else { + fmt.Printf("%s\n", string(apiConfigDump)) + } + }, + } + cmdMachinesAdd.Flags().StringVarP(&machinePassword, "password", "p", "", "machine password to login to the API") + cmdMachinesAdd.Flags().StringVarP(&outputFile, "file", "f", "", + "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")) + cmdMachinesAdd.Flags().StringVarP(&apiURL, "url", "u", "", "URL of the local API") + cmdMachinesAdd.Flags().BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") + cmdMachinesAdd.Flags().BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)") + cmdMachinesAdd.Flags().BoolVar(&forceAdd, "force", false, "will force add the machine if it already exist") + cmdMachines.AddCommand(cmdMachinesAdd) + + var cmdMachinesDelete = &cobra.Command{ + Use: "delete --machine MyTestMachine", + Short: "delete machines", + Example: `cscli machines delete "machine_name"`, + Args: cobra.MinimumNArgs(1), + Aliases: []string{"remove"}, + DisableAutoGenTag: true, + PreRun: func(cmd *cobra.Command, args []string) { + var err error + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + }, + Run: func(cmd *cobra.Command, args []string) { + machineID = args[0] + for _, machineID := range args { + err := dbClient.DeleteWatcher(machineID) + if err != nil { + log.Errorf("unable to delete machine '%s': %s", machineID, err) + return + } + log.Infof("machine '%s' deleted successfully", machineID) + } + }, + } + cmdMachinesDelete.Flags().StringVarP(&machineID, "machine", "m", "", "machine to delete") + cmdMachines.AddCommand(cmdMachinesDelete) + + var cmdMachinesValidate = &cobra.Command{ + Use: "validate", + Short: "validate a machine to access the local API", + Long: `validate a machine to access the local API.`, + Example: `cscli machines validate "machine_name"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + PreRun: func(cmd *cobra.Command, args []string) { + var err error + dbClient, err = database.NewClient(csConfig.DbConfig) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + }, + Run: func(cmd *cobra.Command, args []string) { + machineID = args[0] + if err := dbClient.ValidateMachine(machineID); err != nil { + log.Fatalf("unable to validate machine '%s': %s", machineID, err) + } + log.Infof("machine '%s' validated successfully", machineID) + }, + } + cmdMachines.AddCommand(cmdMachinesValidate) + + return cmdMachines +} diff --git a/cmd/crowdsec-cli/machines_table.go b/cmd/crowdsec-cli/machines_table.go new file mode 100644 index 0000000..cc15bb5 --- /dev/null +++ b/cmd/crowdsec-cli/machines_table.go @@ -0,0 +1,31 @@ +package main + +import ( + "io" + "time" + + "github.com/aquasecurity/table" + "github.com/enescakir/emoji" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +func getAgentsTable(out io.Writer, machines []*ent.Machine) { + t := newLightTable(out) + t.SetHeaders("Name", "IP Address", "Last Update", "Status", "Version", "Auth Type", "Last Heartbeat") + t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + for _, m := range machines { + var validated string + if m.IsValidated { + validated = emoji.CheckMark.String() + } else { + validated = emoji.Prohibited.String() + } + + t.AddRow(m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, displayLastHeartBeat(m, true)) + } + + t.Render() +} diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go new file mode 100644 index 0000000..0b2b865 --- /dev/null +++ b/cmd/crowdsec-cli/main.go @@ -0,0 +1,242 @@ +package main + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "github.com/confluentinc/bincover" + "github.com/fatih/color" + cc "github.com/ivanpirog/coloredcobra" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" +) + +var bincoverTesting = "" + +var trace_lvl, dbg_lvl, nfo_lvl, wrn_lvl, err_lvl bool + +var ConfigFilePath string +var csConfig *csconfig.Config +var dbClient *database.Client + +var OutputFormat string +var OutputColor string + +var downloadOnly bool +var forceAction bool +var purge bool +var all bool +var restoreOldBackup bool + +var prometheusURL string + +func initConfig() { + var err error + if trace_lvl { + log.SetLevel(log.TraceLevel) + } else if dbg_lvl { + log.SetLevel(log.DebugLevel) + } else if nfo_lvl { + log.SetLevel(log.InfoLevel) + } else if wrn_lvl { + log.SetLevel(log.WarnLevel) + } else if err_lvl { + log.SetLevel(log.ErrorLevel) + } + logFormatter := &log.TextFormatter{TimestampFormat: "02-01-2006 03:04:05 PM", FullTimestamp: true} + log.SetFormatter(logFormatter) + + if !inSlice(os.Args[1], NoNeedConfig) { + csConfig, err = csconfig.NewConfig(ConfigFilePath, false, false) + if err != nil { + log.Fatalf(err.Error()) + } + log.Debugf("Using %s as configuration file", ConfigFilePath) + if err := csConfig.LoadCSCLI(); err != nil { + log.Fatalf(err.Error()) + } + } else { + csConfig = csconfig.NewDefaultConfig() + } + + if csConfig.Cscli == nil { + log.Fatalf("missing 'cscli' configuration in '%s', exiting", ConfigFilePath) + } + + if cwhub.HubBranch == "" && csConfig.Cscli.HubBranch != "" { + cwhub.HubBranch = csConfig.Cscli.HubBranch + } + if OutputFormat != "" { + csConfig.Cscli.Output = OutputFormat + if OutputFormat != "json" && OutputFormat != "raw" && OutputFormat != "human" { + log.Fatalf("output format %s unknown", OutputFormat) + } + } + if csConfig.Cscli.Output == "" { + csConfig.Cscli.Output = "human" + } + if csConfig.Cscli.Output == "json" { + log.SetFormatter(&log.JSONFormatter{}) + log.SetLevel(log.ErrorLevel) + } else if csConfig.Cscli.Output == "raw" { + log.SetLevel(log.ErrorLevel) + } + + if OutputColor != "" { + csConfig.Cscli.Color = OutputColor + if OutputColor != "yes" && OutputColor != "no" && OutputColor != "auto" { + log.Fatalf("output color %s unknown", OutputColor) + } + } +} + +var validArgs = []string{ + "scenarios", "parsers", "collections", "capi", "lapi", "postoverflows", "machines", + "metrics", "bouncers", "alerts", "decisions", "simulation", "hub", "dashboard", + "config", "completion", "version", "console", "notifications", "support", +} + +func prepender(filename string) string { + const header = `--- +id: %s +title: %s +--- +` + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + return fmt.Sprintf(header, base, strings.ReplaceAll(base, "_", " ")) +} + +func linkHandler(name string) string { + return fmt.Sprintf("/cscli/%s", name) +} + +var ( + NoNeedConfig = []string{ + "help", + "completion", + "version", + "hubtest", + } +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "cscli", + Short: "cscli allows you to manage crowdsec", + Long: `cscli is the main command to interact with your crowdsec service, scenarios & db. +It is meant to allow you to manage bans, parsers/scenarios/etc, api and generally manage you crowdsec setup.`, + ValidArgs: validArgs, + DisableAutoGenTag: true, + SilenceErrors: true, + SilenceUsage: true, + /*TBD examples*/ + } + + cc.Init(&cc.Config{ + RootCmd: rootCmd, + Headings: cc.Yellow, + Commands: cc.Green + cc.Bold, + CmdShortDescr: cc.Cyan, + Example: cc.Italic, + ExecName: cc.Bold, + Aliases: cc.Bold + cc.Italic, + FlagsDataType: cc.White, + Flags: cc.Green, + FlagsDescr: cc.Cyan, + }) + rootCmd.SetOut(color.Output) + + var cmdDocGen = &cobra.Command{ + Use: "doc", + Short: "Generate the documentation in `./doc/`. Directory must exist.", + Args: cobra.ExactArgs(0), + Hidden: true, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := doc.GenMarkdownTreeCustom(rootCmd, "./doc/", prepender, linkHandler); err != nil { + log.Fatalf("Failed to generate cobra doc: %s", err) + } + }, + } + rootCmd.AddCommand(cmdDocGen) + /*usage*/ + var cmdVersion = &cobra.Command{ + Use: "version", + Short: "Display version and exit.", + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + cwversion.Show() + }, + } + rootCmd.AddCommand(cmdVersion) + + rootCmd.PersistentFlags().StringVarP(&ConfigFilePath, "config", "c", csconfig.DefaultConfigPath("config.yaml"), "path to crowdsec config file") + rootCmd.PersistentFlags().StringVarP(&OutputFormat, "output", "o", "", "Output format: human, json, raw.") + rootCmd.PersistentFlags().StringVarP(&OutputColor, "color", "", "auto", "Output color: yes, no, auto.") + rootCmd.PersistentFlags().BoolVar(&dbg_lvl, "debug", false, "Set logging to debug.") + rootCmd.PersistentFlags().BoolVar(&nfo_lvl, "info", false, "Set logging to info.") + rootCmd.PersistentFlags().BoolVar(&wrn_lvl, "warning", false, "Set logging to warning.") + rootCmd.PersistentFlags().BoolVar(&err_lvl, "error", false, "Set logging to error.") + rootCmd.PersistentFlags().BoolVar(&trace_lvl, "trace", false, "Set logging to trace.") + + rootCmd.PersistentFlags().StringVar(&cwhub.HubBranch, "branch", "", "Override hub branch on github") + if err := rootCmd.PersistentFlags().MarkHidden("branch"); err != nil { + log.Fatalf("failed to hide flag: %s", err) + } + + if len(os.Args) > 1 { + cobra.OnInitialize(initConfig) + } + + /*don't sort flags so we can enforce order*/ + rootCmd.Flags().SortFlags = false + rootCmd.PersistentFlags().SortFlags = false + + rootCmd.AddCommand(NewConfigCmd()) + rootCmd.AddCommand(NewHubCmd()) + rootCmd.AddCommand(NewMetricsCmd()) + rootCmd.AddCommand(NewDashboardCmd()) + rootCmd.AddCommand(NewDecisionsCmd()) + rootCmd.AddCommand(NewAlertsCmd()) + // rootCmd.AddCommand(NewInspectCmd()) + rootCmd.AddCommand(NewSimulationCmds()) + rootCmd.AddCommand(NewBouncersCmd()) + rootCmd.AddCommand(NewMachinesCmd()) + rootCmd.AddCommand(NewParsersCmd()) + rootCmd.AddCommand(NewScenariosCmd()) + rootCmd.AddCommand(NewCollectionsCmd()) + rootCmd.AddCommand(NewPostOverflowsCmd()) + rootCmd.AddCommand(NewCapiCmd()) + rootCmd.AddCommand(NewLapiCmd()) + rootCmd.AddCommand(NewCompletionCmd()) + rootCmd.AddCommand(NewConsoleCmd()) + rootCmd.AddCommand(NewExplainCmd()) + rootCmd.AddCommand(NewHubTestCmd()) + rootCmd.AddCommand(NewNotificationsCmd()) + rootCmd.AddCommand(NewSupportCmd()) + + if err := rootCmd.Execute(); err != nil { + if bincoverTesting != "" { + log.Debug("coverage report is enabled") + } + + exitCode := 1 + log.NewEntry(log.StandardLogger()).Log(log.FatalLevel, err) + if bincoverTesting == "" { + os.Exit(exitCode) + } + bincover.ExitCode = exitCode + } +} diff --git a/cmd/crowdsec-cli/main_test.go b/cmd/crowdsec-cli/main_test.go new file mode 100644 index 0000000..809ae0b --- /dev/null +++ b/cmd/crowdsec-cli/main_test.go @@ -0,0 +1,13 @@ +//go:build testrunmain + +package main + +import ( + "testing" + + "github.com/confluentinc/bincover" +) + +func TestBincoverRunMain(t *testing.T) { + bincover.RunTest(main) +} diff --git a/cmd/crowdsec-cli/messages.go b/cmd/crowdsec-cli/messages.go new file mode 100644 index 0000000..02f0516 --- /dev/null +++ b/cmd/crowdsec-cli/messages.go @@ -0,0 +1,23 @@ +package main + +import ( + "fmt" + "runtime" +) + +// ReloadMessage returns a description of the task required to reload +// the crowdsec configuration, according to the operating system. +func ReloadMessage() string { + var msg string + + switch runtime.GOOS { + case "windows": + msg = "Please restart the crowdsec service" + case "freebsd": + msg = `Run 'sudo service crowdsec reload'` + default: + msg = `Run 'sudo systemctl reload crowdsec'` + } + + return fmt.Sprintf("%s for the new configuration to be effective.", msg) +} diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go new file mode 100644 index 0000000..7620a8a --- /dev/null +++ b/cmd/crowdsec-cli/metrics.go @@ -0,0 +1,289 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strconv" + "strings" + "time" + + "github.com/fatih/color" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/prom2json" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// FormatPrometheusMetrics is a complete rip from prom2json +func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error { + mfChan := make(chan *dto.MetricFamily, 1024) + + // Start with the DefaultTransport for sane defaults. + transport := http.DefaultTransport.(*http.Transport).Clone() + // Conservatively disable HTTP keep-alives as this program will only + // ever need a single HTTP request. + transport.DisableKeepAlives = true + // Timeout early if the server doesn't even return the headers. + transport.ResponseHeaderTimeout = time.Minute + go func() { + defer types.CatchPanic("crowdsec/ShowPrometheus") + err := prom2json.FetchMetricFamilies(url, mfChan, transport) + if err != nil { + log.Fatalf("failed to fetch prometheus metrics : %v", err) + } + }() + + result := []*prom2json.Family{} + for mf := range mfChan { + result = append(result, prom2json.NewFamily(mf)) + } + log.Debugf("Finished reading prometheus output, %d entries", len(result)) + /*walk*/ + lapi_decisions_stats := map[string]struct { + NonEmpty int + Empty int + }{} + acquis_stats := map[string]map[string]int{} + parsers_stats := map[string]map[string]int{} + buckets_stats := map[string]map[string]int{} + lapi_stats := map[string]map[string]int{} + lapi_machine_stats := map[string]map[string]map[string]int{} + lapi_bouncer_stats := map[string]map[string]map[string]int{} + decisions_stats := map[string]map[string]map[string]int{} + alerts_stats := map[string]int{} + + for idx, fam := range result { + if !strings.HasPrefix(fam.Name, "cs_") { + continue + } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { + metric, ok := m.(prom2json.Metric) + if !ok { + log.Debugf("failed to convert metric to prom2json.Metric") + continue + } + name, ok := metric.Labels["name"] + if !ok { + log.Debugf("no name in Metric %v", metric.Labels) + } + source, ok := metric.Labels["source"] + if !ok { + log.Debugf("no source in Metric %v for %s", metric.Labels, fam.Name) + } else { + if srctype, ok := metric.Labels["type"]; ok { + source = srctype + ":" + source + } + } + + value := m.(prom2json.Metric).Value + machine := metric.Labels["machine"] + bouncer := metric.Labels["bouncer"] + + route := metric.Labels["route"] + method := metric.Labels["method"] + + reason := metric.Labels["reason"] + origin := metric.Labels["origin"] + action := metric.Labels["action"] + + fval, err := strconv.ParseFloat(value, 32) + if err != nil { + log.Errorf("Unexpected int value %s : %s", value, err) + } + ival := int(fval) + switch fam.Name { + /*buckets*/ + case "cs_bucket_created_total": + if _, ok := buckets_stats[name]; !ok { + buckets_stats[name] = make(map[string]int) + } + buckets_stats[name]["instantiation"] += ival + case "cs_buckets": + if _, ok := buckets_stats[name]; !ok { + buckets_stats[name] = make(map[string]int) + } + buckets_stats[name]["curr_count"] += ival + case "cs_bucket_overflowed_total": + if _, ok := buckets_stats[name]; !ok { + buckets_stats[name] = make(map[string]int) + } + buckets_stats[name]["overflow"] += ival + case "cs_bucket_poured_total": + if _, ok := buckets_stats[name]; !ok { + buckets_stats[name] = make(map[string]int) + } + if _, ok := acquis_stats[source]; !ok { + acquis_stats[source] = make(map[string]int) + } + buckets_stats[name]["pour"] += ival + acquis_stats[source]["pour"] += ival + case "cs_bucket_underflowed_total": + if _, ok := buckets_stats[name]; !ok { + buckets_stats[name] = make(map[string]int) + } + buckets_stats[name]["underflow"] += ival + /*acquis*/ + case "cs_parser_hits_total": + if _, ok := acquis_stats[source]; !ok { + acquis_stats[source] = make(map[string]int) + } + acquis_stats[source]["reads"] += ival + case "cs_parser_hits_ok_total": + if _, ok := acquis_stats[source]; !ok { + acquis_stats[source] = make(map[string]int) + } + acquis_stats[source]["parsed"] += ival + case "cs_parser_hits_ko_total": + if _, ok := acquis_stats[source]; !ok { + acquis_stats[source] = make(map[string]int) + } + acquis_stats[source]["unparsed"] += ival + case "cs_node_hits_total": + if _, ok := parsers_stats[name]; !ok { + parsers_stats[name] = make(map[string]int) + } + parsers_stats[name]["hits"] += ival + case "cs_node_hits_ok_total": + if _, ok := parsers_stats[name]; !ok { + parsers_stats[name] = make(map[string]int) + } + parsers_stats[name]["parsed"] += ival + case "cs_node_hits_ko_total": + if _, ok := parsers_stats[name]; !ok { + parsers_stats[name] = make(map[string]int) + } + parsers_stats[name]["unparsed"] += ival + case "cs_lapi_route_requests_total": + if _, ok := lapi_stats[route]; !ok { + lapi_stats[route] = make(map[string]int) + } + lapi_stats[route][method] += ival + case "cs_lapi_machine_requests_total": + if _, ok := lapi_machine_stats[machine]; !ok { + lapi_machine_stats[machine] = make(map[string]map[string]int) + } + if _, ok := lapi_machine_stats[machine][route]; !ok { + lapi_machine_stats[machine][route] = make(map[string]int) + } + lapi_machine_stats[machine][route][method] += ival + case "cs_lapi_bouncer_requests_total": + if _, ok := lapi_bouncer_stats[bouncer]; !ok { + lapi_bouncer_stats[bouncer] = make(map[string]map[string]int) + } + if _, ok := lapi_bouncer_stats[bouncer][route]; !ok { + lapi_bouncer_stats[bouncer][route] = make(map[string]int) + } + lapi_bouncer_stats[bouncer][route][method] += ival + case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total": + if _, ok := lapi_decisions_stats[bouncer]; !ok { + lapi_decisions_stats[bouncer] = struct { + NonEmpty int + Empty int + }{} + } + x := lapi_decisions_stats[bouncer] + if fam.Name == "cs_lapi_decisions_ko_total" { + x.Empty += ival + } else if fam.Name == "cs_lapi_decisions_ok_total" { + x.NonEmpty += ival + } + lapi_decisions_stats[bouncer] = x + case "cs_active_decisions": + if _, ok := decisions_stats[reason]; !ok { + decisions_stats[reason] = make(map[string]map[string]int) + } + if _, ok := decisions_stats[reason][origin]; !ok { + decisions_stats[reason][origin] = make(map[string]int) + } + decisions_stats[reason][origin][action] += ival + case "cs_alerts": + /*if _, ok := alerts_stats[scenario]; !ok { + alerts_stats[scenario] = make(map[string]int) + }*/ + alerts_stats[reason] += ival + default: + continue + } + + } + } + + if formatType == "human" { + acquisStatsTable(out, acquis_stats) + bucketStatsTable(out, buckets_stats) + parserStatsTable(out, parsers_stats) + lapiStatsTable(out, lapi_stats) + lapiMachineStatsTable(out, lapi_machine_stats) + lapiBouncerStatsTable(out, lapi_bouncer_stats) + lapiDecisionStatsTable(out, lapi_decisions_stats) + decisionStatsTable(out, decisions_stats) + alertStatsTable(out, alerts_stats) + } else if formatType == "json" { + for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats, decisions_stats, alerts_stats} { + x, err := json.MarshalIndent(val, "", " ") + if err != nil { + return fmt.Errorf("failed to unmarshal metrics : %v", err) + } + out.Write(x) + } + return nil + + } else if formatType == "raw" { + for _, val := range []interface{}{acquis_stats, parsers_stats, buckets_stats, lapi_stats, lapi_bouncer_stats, lapi_machine_stats, lapi_decisions_stats, decisions_stats, alerts_stats} { + x, err := yaml.Marshal(val) + if err != nil { + return fmt.Errorf("failed to unmarshal metrics : %v", err) + } + out.Write(x) + } + return nil + } + return nil +} + +var noUnit bool + +func NewMetricsCmd() *cobra.Command { + /* ---- UPDATE COMMAND */ + var cmdMetrics = &cobra.Command{ + Use: "metrics", + Short: "Display crowdsec prometheus metrics.", + Long: `Fetch metrics from the prometheus server and display them in a human-friendly way`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := csConfig.LoadPrometheus(); err != nil { + log.Fatalf(err.Error()) + } + if !csConfig.Prometheus.Enabled { + log.Warning("Prometheus is not enabled, can't show metrics") + os.Exit(1) + } + + if prometheusURL == "" { + prometheusURL = csConfig.Cscli.PrometheusUrl + } + + if prometheusURL == "" { + log.Errorf("No prometheus url, please specify in %s or via -u", *csConfig.FilePath) + os.Exit(1) + } + + err := FormatPrometheusMetrics(color.Output, prometheusURL+"/metrics", csConfig.Cscli.Output) + if err != nil { + log.Fatalf("could not fetch prometheus metrics: %s", err) + } + }, + } + cmdMetrics.PersistentFlags().StringVarP(&prometheusURL, "url", "u", "", "Prometheus url (http://:/metrics)") + cmdMetrics.PersistentFlags().BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") + + return cmdMetrics +} diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go new file mode 100644 index 0000000..f55d89c --- /dev/null +++ b/cmd/crowdsec-cli/metrics_table.go @@ -0,0 +1,272 @@ +package main + +import ( + "fmt" + "io" + "sort" + + "github.com/aquasecurity/table" + log "github.com/sirupsen/logrus" +) + +func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int) int { + // stats: machine -> route -> method -> count + + // sort keys to keep consistent order when printing + machineKeys := []string{} + for k := range stats { + machineKeys = append(machineKeys, k) + } + sort.Strings(machineKeys) + + numRows := 0 + for _, machine := range machineKeys { + // oneRow: route -> method -> count + machineRow := stats[machine] + for routeName, route := range machineRow { + for methodName, count := range route { + row := []string{ + machine, + routeName, + methodName, + } + if count != 0 { + row = append(row, fmt.Sprintf("%d", count)) + } else { + row = append(row, "-") + } + t.AddRow(row...) + numRows++ + } + } + } + return numRows +} + +func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string) (int, error) { + if t == nil { + return 0, fmt.Errorf("nil table") + } + // sort keys to keep consistent order when printing + sortedKeys := []string{} + for k := range stats { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + numRows := 0 + for _, alabel := range sortedKeys { + astats, ok := stats[alabel] + if !ok { + continue + } + row := []string{ + alabel, + } + for _, sl := range keys { + if v, ok := astats[sl]; ok && v != 0 { + numberToShow := fmt.Sprintf("%d", v) + if !noUnit { + numberToShow = formatNumber(v) + } + + row = append(row, numberToShow) + } else { + row = append(row, "-") + } + } + t.AddRow(row...) + numRows++ + } + return numRows, nil +} + +func bucketStatsTable(out io.Writer, stats map[string]map[string]int) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Bucket", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} + + if numRows, err := metricsToTable(t, stats, keys); err != nil { + log.Warningf("while collecting acquis stats: %s", err) + } else if numRows > 0 { + renderTableTitle(out, "\nBucket Metrics:") + t.Render() + } +} + +func acquisStatsTable(out io.Writer, stats map[string]map[string]int) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket") + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + keys := []string{"reads", "parsed", "unparsed", "pour"} + + if numRows, err := metricsToTable(t, stats, keys); err != nil { + log.Warningf("while collecting acquis stats: %s", err) + } else if numRows > 0 { + renderTableTitle(out, "\nAcquisition Metrics:") + t.Render() + } +} + +func parserStatsTable(out io.Writer, stats map[string]map[string]int) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + keys := []string{"hits", "parsed", "unparsed"} + + if numRows, err := metricsToTable(t, stats, keys); err != nil { + log.Warningf("while collecting acquis stats: %s", err) + } else if numRows > 0 { + renderTableTitle(out, "\nParser Metrics:") + t.Render() + } +} + +func lapiStatsTable(out io.Writer, stats map[string]map[string]int) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Route", "Method", "Hits") + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + + // unfortunately, we can't reuse metricsToTable as the structure is too different :/ + sortedKeys := []string{} + for k := range stats { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + numRows := 0 + for _, alabel := range sortedKeys { + astats := stats[alabel] + + subKeys := []string{} + for skey := range astats { + subKeys = append(subKeys, skey) + } + sort.Strings(subKeys) + + for _, sl := range subKeys { + row := []string{ + alabel, + sl, + fmt.Sprintf("%d", astats[sl]), + } + t.AddRow(row...) + numRows++ + } + } + + if numRows > 0 { + renderTableTitle(out, "\nLocal Api Metrics:") + t.Render() + } +} + +func lapiMachineStatsTable(out io.Writer, stats map[string]map[string]map[string]int) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Machine", "Route", "Method", "Hits") + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + numRows := lapiMetricsToTable(t, stats) + + if numRows > 0 { + renderTableTitle(out, "\nLocal Api Machines Metrics:") + t.Render() + } +} + +func lapiBouncerStatsTable(out io.Writer, stats map[string]map[string]map[string]int) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Bouncer", "Route", "Method", "Hits") + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + numRows := lapiMetricsToTable(t, stats) + + if numRows > 0 { + renderTableTitle(out, "\nLocal Api Bouncers Metrics:") + t.Render() + } +} + +func lapiDecisionStatsTable(out io.Writer, stats map[string]struct { + NonEmpty int + Empty int +}, +) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + + numRows := 0 + for bouncer, hits := range stats { + t.AddRow( + bouncer, + fmt.Sprintf("%d", hits.Empty), + fmt.Sprintf("%d", hits.NonEmpty), + ) + numRows++ + } + + if numRows > 0 { + renderTableTitle(out, "\nLocal Api Bouncers Decisions:") + t.Render() + } +} + +func decisionStatsTable(out io.Writer, stats map[string]map[string]map[string]int) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Reason", "Origin", "Action", "Count") + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + numRows := 0 + for reason, origins := range stats { + for origin, actions := range origins { + for action, hits := range actions { + t.AddRow( + reason, + origin, + action, + fmt.Sprintf("%d", hits), + ) + numRows++ + } + } + } + + if numRows > 0 { + renderTableTitle(out, "\nLocal Api Decisions:") + t.Render() + } +} + +func alertStatsTable(out io.Writer, stats map[string]int) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Reason", "Count") + t.SetAlignment(table.AlignLeft, table.AlignLeft) + + numRows := 0 + for scenario, hits := range stats { + t.AddRow( + scenario, + fmt.Sprintf("%d", hits), + ) + numRows++ + } + + if numRows > 0 { + renderTableTitle(out, "\nLocal Api Alerts:") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go new file mode 100644 index 0000000..8125529 --- /dev/null +++ b/cmd/crowdsec-cli/notifications.go @@ -0,0 +1,332 @@ +package main + +import ( + "context" + "encoding/csv" + "encoding/json" + "fmt" + "io/fs" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/fatih/color" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" + "github.com/crowdsecurity/crowdsec/pkg/csprofiles" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" +) + +type NotificationsCfg struct { + Config csplugin.PluginConfig `json:"plugin_config"` + Profiles []*csconfig.ProfileCfg `json:"associated_profiles"` + ids []uint +} + +func NewNotificationsCmd() *cobra.Command { + var cmdNotifications = &cobra.Command{ + Use: "notifications [action]", + Short: "Helper for notification plugin configuration", + Long: "To list/inspect/test notification template", + Args: cobra.MinimumNArgs(1), + Aliases: []string{"notifications", "notification"}, + DisableAutoGenTag: true, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var ( + err error + ) + if err = csConfig.API.Server.LoadProfiles(); err != nil { + log.Fatalf(err.Error()) + } + if csConfig.ConfigPaths.NotificationDir == "" { + log.Fatalf("config_paths.notification_dir is not set in crowdsec config") + } + }, + } + + var cmdNotificationsList = &cobra.Command{ + Use: "list", + Short: "List active notifications plugins", + Long: `List active notifications plugins`, + Example: `cscli notifications list`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, arg []string) error { + ncfgs, err := getNotificationsConfiguration() + if err != nil { + return errors.Wrap(err, "Can't build profiles configuration") + } + + if csConfig.Cscli.Output == "human" { + notificationListTable(color.Output, ncfgs) + } else if csConfig.Cscli.Output == "json" { + x, err := json.MarshalIndent(ncfgs, "", " ") + if err != nil { + return errors.New("failed to marshal notification configuration") + } + fmt.Printf("%s", string(x)) + } else if csConfig.Cscli.Output == "raw" { + csvwriter := csv.NewWriter(os.Stdout) + err := csvwriter.Write([]string{"Name", "Type", "Profile name"}) + if err != nil { + return errors.Wrap(err, "failed to write raw header") + } + for _, b := range ncfgs { + profilesList := []string{} + for _, p := range b.Profiles { + profilesList = append(profilesList, p.Name) + } + err := csvwriter.Write([]string{b.Config.Name, b.Config.Type, strings.Join(profilesList, ", ")}) + if err != nil { + return errors.Wrap(err, "failed to write raw content") + } + } + csvwriter.Flush() + } + return nil + }, + } + cmdNotifications.AddCommand(cmdNotificationsList) + + var cmdNotificationsInspect = &cobra.Command{ + Use: "inspect", + Short: "Inspect active notifications plugin configuration", + Long: `Inspect active notifications plugin and show configuration`, + Example: `cscli notifications inspect `, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, arg []string) error { + var ( + cfg NotificationsCfg + ok bool + ) + + pluginName := arg[0] + + if pluginName == "" { + errors.New("Please provide a plugin name to inspect") + } + ncfgs, err := getNotificationsConfiguration() + if err != nil { + return errors.Wrap(err, "Can't build profiles configuration") + } + if cfg, ok = ncfgs[pluginName]; !ok { + return errors.New("The provided plugin name doesn't exist or isn't active") + } + + if csConfig.Cscli.Output == "human" || csConfig.Cscli.Output == "raw" { + fmt.Printf(" - %15s: %15s\n", "Type", cfg.Config.Type) + fmt.Printf(" - %15s: %15s\n", "Name", cfg.Config.Name) + fmt.Printf(" - %15s: %15s\n", "Timeout", cfg.Config.TimeOut) + fmt.Printf(" - %15s: %15s\n", "Format", cfg.Config.Format) + for k, v := range cfg.Config.Config { + fmt.Printf(" - %15s: %15v\n", k, v) + } + } else if csConfig.Cscli.Output == "json" { + x, err := json.MarshalIndent(cfg, "", " ") + if err != nil { + return errors.New("failed to marshal notification configuration") + } + fmt.Printf("%s", string(x)) + } + return nil + }, + } + cmdNotifications.AddCommand(cmdNotificationsInspect) + var remediation bool + var alertOverride string + var cmdNotificationsReinject = &cobra.Command{ + Use: "reinject", + Short: "reinject alert into notifications system", + Long: `Reinject alert into notifications system`, + Example: ` +cscli notifications reinject +cscli notifications reinject --remediation +cscli notifications reinject -a '{"remediation": true,"scenario":"notification/test"}' +`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + var ( + pluginBroker csplugin.PluginBroker + pluginTomb tomb.Tomb + ) + if len(args) != 1 { + printHelp(cmd) + return errors.New("Wrong number of argument: there should be one argument") + } + + //first: get the alert + id, err := strconv.Atoi(args[0]) + if err != nil { + return errors.New(fmt.Sprintf("bad alert id %s", args[0])) + } + if err := csConfig.LoadAPIClient(); err != nil { + return errors.Wrapf(err, "loading api client") + } + if csConfig.API.Client == nil { + return errors.New("There is no configuration on 'api_client:'") + } + if csConfig.API.Client.Credentials == nil { + return errors.New(fmt.Sprintf("Please provide credentials for the API in '%s'", csConfig.API.Client.CredentialsFilePath)) + } + apiURL, err := url.Parse(csConfig.API.Client.Credentials.URL) + if err != nil { + return errors.Wrapf(err, "error parsing the URL of the API") + } + client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: csConfig.API.Client.Credentials.Login, + Password: strfmt.Password(csConfig.API.Client.Credentials.Password), + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + if err != nil { + return errors.Wrapf(err, "error creating the client for the API") + } + alert, _, err := client.Alerts.GetByID(context.Background(), id) + if err != nil { + return errors.Wrapf(err, fmt.Sprintf("can't find alert with id %s", args[0])) + } + + if alertOverride != "" { + if err = json.Unmarshal([]byte(alertOverride), alert); err != nil { + return errors.Wrapf(err, "Can't unmarshal the data given in the alert flag") + } + } + if !remediation { + alert.Remediation = true + } + + // second we start plugins + err = pluginBroker.Init(csConfig.PluginConfig, csConfig.API.Server.Profiles, csConfig.ConfigPaths) + if err != nil { + return errors.Wrapf(err, "Can't initialize plugins") + } + + pluginTomb.Go(func() error { + pluginBroker.Run(&pluginTomb) + return nil + }) + + //third: get the profile(s), and process the whole stuff + + profiles, err := csprofiles.NewProfile(csConfig.API.Server.Profiles) + if err != nil { + return errors.Wrap(err, "Cannot extract profiles from configuration") + } + + for id, profile := range profiles { + _, matched, err := profile.EvaluateProfile(alert) + if err != nil { + return errors.Wrapf(err, "can't evaluate profile %s", profile.Cfg.Name) + } + if !matched { + log.Infof("The profile %s didn't match", profile.Cfg.Name) + continue + } + log.Infof("The profile %s matched, sending to its configured notification plugins", profile.Cfg.Name) + loop: + for { + select { + case pluginBroker.PluginChannel <- csplugin.ProfileAlert{ + ProfileID: uint(id), + Alert: alert, + }: + break loop + default: + time.Sleep(50 * time.Millisecond) + log.Info("sleeping\n") + + } + } + if profile.Cfg.OnSuccess == "break" { + log.Infof("The profile %s contains a 'on_success: break' so bailing out", profile.Cfg.Name) + break + } + } + + // time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent + pluginTomb.Kill(errors.New("terminating")) + pluginTomb.Wait() + return nil + }, + } + cmdNotificationsReinject.Flags().BoolVarP(&remediation, "remediation", "r", false, "Set Alert.Remediation to false in the reinjected alert (see your profile filter configuration)") + cmdNotificationsReinject.Flags().StringVarP(&alertOverride, "alert", "a", "", "JSON string used to override alert fields in the reinjected alert (see crowdsec/pkg/models/alert.go in the source tree for the full definition of the object)") + cmdNotifications.AddCommand(cmdNotificationsReinject) + return cmdNotifications +} + +func getNotificationsConfiguration() (map[string]NotificationsCfg, error) { + pcfgs := map[string]csplugin.PluginConfig{} + wf := func(path string, info fs.FileInfo, err error) error { + if info == nil { + return errors.Wrapf(err, "error while traversing directory %s", path) + } + name := filepath.Join(csConfig.ConfigPaths.NotificationDir, info.Name()) //Avoid calling info.Name() twice + if (strings.HasSuffix(name, "yaml") || strings.HasSuffix(name, "yml")) && !(info.IsDir()) { + ts, err := csplugin.ParsePluginConfigFile(name) + if err != nil { + return errors.Wrapf(err, "Loading notifification plugin configuration with %s", name) + } + for _, t := range ts { + pcfgs[t.Name] = t + } + } + return nil + } + + if err := filepath.Walk(csConfig.ConfigPaths.NotificationDir, wf); err != nil { + return nil, errors.Wrap(err, "Loading notifification plugin configuration") + } + + // A bit of a tricky stuf now: reconcile profiles and notification plugins + ncfgs := map[string]NotificationsCfg{} + profiles, err := csprofiles.NewProfile(csConfig.API.Server.Profiles) + if err != nil { + return nil, errors.Wrap(err, "Cannot extract profiles from configuration") + } + for profileID, profile := range profiles { + loop: + for _, notif := range profile.Cfg.Notifications { + for name, pc := range pcfgs { + if notif == name { + if _, ok := ncfgs[pc.Name]; !ok { + ncfgs[pc.Name] = NotificationsCfg{ + Config: pc, + Profiles: []*csconfig.ProfileCfg{profile.Cfg}, + ids: []uint{uint(profileID)}, + } + continue loop + } + tmp := ncfgs[pc.Name] + for _, pr := range tmp.Profiles { + var profiles []*csconfig.ProfileCfg + if pr.Name == profile.Cfg.Name { + continue + } + profiles = append(tmp.Profiles, profile.Cfg) + ids := append(tmp.ids, uint(profileID)) + ncfgs[pc.Name] = NotificationsCfg{ + Config: tmp.Config, + Profiles: profiles, + ids: ids, + } + } + } + } + } + } + return ncfgs, nil +} diff --git a/cmd/crowdsec-cli/notifications_table.go b/cmd/crowdsec-cli/notifications_table.go new file mode 100644 index 0000000..1113bb7 --- /dev/null +++ b/cmd/crowdsec-cli/notifications_table.go @@ -0,0 +1,25 @@ +package main + +import ( + "io" + "strings" + + "github.com/aquasecurity/table" +) + +func notificationListTable(out io.Writer, ncfgs map[string]NotificationsCfg) { + t := newLightTable(out) + t.SetHeaders("Name", "Type", "Profile name") + t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + + for _, b := range ncfgs { + profilesList := []string{} + for _, p := range b.Profiles { + profilesList = append(profilesList, p.Name) + } + t.AddRow(b.Config.Name, b.Config.Type, strings.Join(profilesList, ", ")) + } + + t.Render() +} diff --git a/cmd/crowdsec-cli/parsers.go b/cmd/crowdsec-cli/parsers.go new file mode 100644 index 0000000..973b60e --- /dev/null +++ b/cmd/crowdsec-cli/parsers.go @@ -0,0 +1,174 @@ +package main + +import ( + "fmt" + + "github.com/fatih/color" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewParsersCmd() *cobra.Command { + var cmdParsers = &cobra.Command{ + Use: "parsers [action] [config]", + Short: "Install/Remove/Upgrade/Inspect parser(s) from hub", + Example: `cscli parsers install crowdsecurity/sshd-logs +cscli parsers inspect crowdsecurity/sshd-logs +cscli parsers upgrade crowdsecurity/sshd-logs +cscli parsers list +cscli parsers remove crowdsecurity/sshd-logs +`, + Args: cobra.MinimumNArgs(1), + Aliases: []string{"parser"}, + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if csConfig.Hub == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := cwhub.SetHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to get Hub index : %v", err) + } + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + if cmd.Name() == "inspect" || cmd.Name() == "list" { + return + } + log.Infof(ReloadMessage()) + }, + } + + var ignoreError bool + var cmdParsersInstall = &cobra.Command{ + Use: "install [config]", + Short: "Install given parser(s)", + Long: `Fetch and install given parser(s) from hub`, + Example: `cscli parsers install crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compAllItems(cwhub.PARSERS, args, toComplete) + }, + Run: func(cmd *cobra.Command, args []string) { + for _, name := range args { + t := cwhub.GetItem(cwhub.PARSERS, name) + if t == nil { + nearestItem, score := GetDistance(cwhub.PARSERS, name) + Suggest(cwhub.PARSERS, name, nearestItem.Name, score, ignoreError) + continue + } + if err := cwhub.InstallItem(csConfig, name, cwhub.PARSERS, forceAction, downloadOnly); err != nil { + if ignoreError { + log.Errorf("Error while installing '%s': %s", name, err) + } else { + log.Fatalf("Error while installing '%s': %s", name, err) + } + } + } + }, + } + cmdParsersInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + cmdParsersInstall.PersistentFlags().BoolVar(&forceAction, "force", false, "Force install : Overwrite tainted and outdated files") + cmdParsersInstall.PersistentFlags().BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple parsers") + cmdParsers.AddCommand(cmdParsersInstall) + + var cmdParsersRemove = &cobra.Command{ + Use: "remove [config]", + Short: "Remove given parser(s)", + Long: `Remove given parse(s) from hub`, + Aliases: []string{"delete"}, + Example: `cscli parsers remove crowdsec/xxx crowdsec/xyz`, + DisableAutoGenTag: true, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.PARSERS, args, toComplete) + }, + Run: func(cmd *cobra.Command, args []string) { + if all { + cwhub.RemoveMany(csConfig, cwhub.PARSERS, "", all, purge, forceAction) + return + } + + if len(args) == 0 { + log.Fatalf("Specify at least one parser to remove or '--all' flag.") + } + + for _, name := range args { + cwhub.RemoveMany(csConfig, cwhub.PARSERS, name, all, purge, forceAction) + } + }, + } + cmdParsersRemove.PersistentFlags().BoolVar(&purge, "purge", false, "Delete source file too") + cmdParsersRemove.PersistentFlags().BoolVar(&forceAction, "force", false, "Force remove : Remove tainted and outdated files") + cmdParsersRemove.PersistentFlags().BoolVar(&all, "all", false, "Delete all the parsers") + cmdParsers.AddCommand(cmdParsersRemove) + + var cmdParsersUpgrade = &cobra.Command{ + Use: "upgrade [config]", + Short: "Upgrade given parser(s)", + Long: `Fetch and upgrade given parser(s) from hub`, + Example: `cscli parsers upgrade crowdsec/xxx crowdsec/xyz`, + DisableAutoGenTag: true, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.PARSERS, args, toComplete) + }, + Run: func(cmd *cobra.Command, args []string) { + if all { + cwhub.UpgradeConfig(csConfig, cwhub.PARSERS, "", forceAction) + } else { + if len(args) == 0 { + log.Fatalf("no target parser to upgrade") + } + for _, name := range args { + cwhub.UpgradeConfig(csConfig, cwhub.PARSERS, name, forceAction) + } + } + }, + } + cmdParsersUpgrade.PersistentFlags().BoolVar(&all, "all", false, "Upgrade all the parsers") + cmdParsersUpgrade.PersistentFlags().BoolVar(&forceAction, "force", false, "Force upgrade : Overwrite tainted and outdated files") + cmdParsers.AddCommand(cmdParsersUpgrade) + + var cmdParsersInspect = &cobra.Command{ + Use: "inspect [name]", + Short: "Inspect given parser", + Long: `Inspect given parser`, + Example: `cscli parsers inspect crowdsec/xxx`, + DisableAutoGenTag: true, + Args: cobra.MinimumNArgs(1), + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.PARSERS, args, toComplete) + }, + Run: func(cmd *cobra.Command, args []string) { + InspectItem(args[0], cwhub.PARSERS) + }, + } + cmdParsersInspect.PersistentFlags().StringVarP(&prometheusURL, "url", "u", "", "Prometheus url") + cmdParsers.AddCommand(cmdParsersInspect) + + var cmdParsersList = &cobra.Command{ + Use: "list [name]", + Short: "List all parsers or given one", + Long: `List all parsers or given one`, + Example: `cscli parsers list +cscli parser list crowdsecurity/xxx`, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + ListItems(color.Output, []string{cwhub.PARSERS}, args, false, true, all) + }, + } + cmdParsersList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List disabled items as well") + cmdParsers.AddCommand(cmdParsersList) + + return cmdParsers +} diff --git a/cmd/crowdsec-cli/postoverflows.go b/cmd/crowdsec-cli/postoverflows.go new file mode 100644 index 0000000..f51f8a4 --- /dev/null +++ b/cmd/crowdsec-cli/postoverflows.go @@ -0,0 +1,172 @@ +package main + +import ( + "fmt" + + "github.com/fatih/color" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewPostOverflowsCmd() *cobra.Command { + var cmdPostOverflows = &cobra.Command{ + Use: "postoverflows [action] [config]", + Short: "Install/Remove/Upgrade/Inspect postoverflow(s) from hub", + Example: `cscli postoverflows install crowdsecurity/cdn-whitelist + cscli postoverflows inspect crowdsecurity/cdn-whitelist + cscli postoverflows upgrade crowdsecurity/cdn-whitelist + cscli postoverflows list + cscli postoverflows remove crowdsecurity/cdn-whitelist`, + Args: cobra.MinimumNArgs(1), + Aliases: []string{"postoverflow"}, + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if csConfig.Hub == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := cwhub.SetHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to get Hub index : %v", err) + } + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + if cmd.Name() == "inspect" || cmd.Name() == "list" { + return + } + log.Infof(ReloadMessage()) + }, + } + + var ignoreError bool + var cmdPostOverflowsInstall = &cobra.Command{ + Use: "install [config]", + Short: "Install given postoverflow(s)", + Long: `Fetch and install given postoverflow(s) from hub`, + Example: `cscli postoverflows install crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compAllItems(cwhub.PARSERS_OVFLW, args, toComplete) + }, + Run: func(cmd *cobra.Command, args []string) { + for _, name := range args { + t := cwhub.GetItem(cwhub.PARSERS_OVFLW, name) + if t == nil { + nearestItem, score := GetDistance(cwhub.PARSERS_OVFLW, name) + Suggest(cwhub.PARSERS_OVFLW, name, nearestItem.Name, score, ignoreError) + continue + } + if err := cwhub.InstallItem(csConfig, name, cwhub.PARSERS_OVFLW, forceAction, downloadOnly); err != nil { + if ignoreError { + log.Errorf("Error while installing '%s': %s", name, err) + } else { + log.Fatalf("Error while installing '%s': %s", name, err) + } + } + } + }, + } + cmdPostOverflowsInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + cmdPostOverflowsInstall.PersistentFlags().BoolVar(&forceAction, "force", false, "Force install : Overwrite tainted and outdated files") + cmdPostOverflowsInstall.PersistentFlags().BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple postoverflows") + cmdPostOverflows.AddCommand(cmdPostOverflowsInstall) + + var cmdPostOverflowsRemove = &cobra.Command{ + Use: "remove [config]", + Short: "Remove given postoverflow(s)", + Long: `remove given postoverflow(s)`, + Example: `cscli postoverflows remove crowdsec/xxx crowdsec/xyz`, + DisableAutoGenTag: true, + Aliases: []string{"delete"}, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.PARSERS_OVFLW, args, toComplete) + }, + Run: func(cmd *cobra.Command, args []string) { + if all { + cwhub.RemoveMany(csConfig, cwhub.PARSERS_OVFLW, "", all, purge, forceAction) + return + } + + if len(args) == 0 { + log.Fatalf("Specify at least one postoverflow to remove or '--all' flag.") + } + + for _, name := range args { + cwhub.RemoveMany(csConfig, cwhub.PARSERS_OVFLW, name, all, purge, forceAction) + } + }, + } + cmdPostOverflowsRemove.PersistentFlags().BoolVar(&purge, "purge", false, "Delete source file too") + cmdPostOverflowsRemove.PersistentFlags().BoolVar(&forceAction, "force", false, "Force remove : Remove tainted and outdated files") + cmdPostOverflowsRemove.PersistentFlags().BoolVar(&all, "all", false, "Delete all the postoverflows") + cmdPostOverflows.AddCommand(cmdPostOverflowsRemove) + + var cmdPostOverflowsUpgrade = &cobra.Command{ + Use: "upgrade [config]", + Short: "Upgrade given postoverflow(s)", + Long: `Fetch and Upgrade given postoverflow(s) from hub`, + Example: `cscli postoverflows upgrade crowdsec/xxx crowdsec/xyz`, + DisableAutoGenTag: true, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.PARSERS_OVFLW, args, toComplete) + }, + Run: func(cmd *cobra.Command, args []string) { + if all { + cwhub.UpgradeConfig(csConfig, cwhub.PARSERS_OVFLW, "", forceAction) + } else { + if len(args) == 0 { + log.Fatalf("no target postoverflow to upgrade") + } + for _, name := range args { + cwhub.UpgradeConfig(csConfig, cwhub.PARSERS_OVFLW, name, forceAction) + } + } + }, + } + cmdPostOverflowsUpgrade.PersistentFlags().BoolVarP(&all, "all", "a", false, "Upgrade all the postoverflows") + cmdPostOverflowsUpgrade.PersistentFlags().BoolVar(&forceAction, "force", false, "Force upgrade : Overwrite tainted and outdated files") + cmdPostOverflows.AddCommand(cmdPostOverflowsUpgrade) + + var cmdPostOverflowsInspect = &cobra.Command{ + Use: "inspect [config]", + Short: "Inspect given postoverflow", + Long: `Inspect given postoverflow`, + Example: `cscli postoverflows inspect crowdsec/xxx crowdsec/xyz`, + DisableAutoGenTag: true, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.PARSERS_OVFLW, args, toComplete) + }, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + InspectItem(args[0], cwhub.PARSERS_OVFLW) + }, + } + cmdPostOverflows.AddCommand(cmdPostOverflowsInspect) + + var cmdPostOverflowsList = &cobra.Command{ + Use: "list [config]", + Short: "List all postoverflows or given one", + Long: `List all postoverflows or given one`, + Example: `cscli postoverflows list +cscli postoverflows list crowdsecurity/xxx`, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + ListItems(color.Output, []string{cwhub.PARSERS_OVFLW}, args, false, true, all) + }, + } + cmdPostOverflowsList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List disabled items as well") + cmdPostOverflows.AddCommand(cmdPostOverflowsList) + + return cmdPostOverflows +} diff --git a/cmd/crowdsec-cli/scenarios.go b/cmd/crowdsec-cli/scenarios.go new file mode 100644 index 0000000..cc892b2 --- /dev/null +++ b/cmd/crowdsec-cli/scenarios.go @@ -0,0 +1,177 @@ +package main + +import ( + "fmt" + + "github.com/fatih/color" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewScenariosCmd() *cobra.Command { + + var cmdScenarios = &cobra.Command{ + Use: "scenarios [action] [config]", + Short: "Install/Remove/Upgrade/Inspect scenario(s) from hub", + Example: `cscli scenarios list [-a] +cscli scenarios install crowdsecurity/ssh-bf +cscli scenarios inspect crowdsecurity/ssh-bf +cscli scenarios upgrade crowdsecurity/ssh-bf +cscli scenarios remove crowdsecurity/ssh-bf +`, + Args: cobra.MinimumNArgs(1), + Aliases: []string{"scenario"}, + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if csConfig.Hub == nil { + return fmt.Errorf("you must configure cli before interacting with hub") + } + + if err := cwhub.SetHubBranch(); err != nil { + return errors.Wrap(err, "while setting hub branch") + } + + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to get Hub index : %v", err) + } + + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + if cmd.Name() == "inspect" || cmd.Name() == "list" { + return + } + log.Infof(ReloadMessage()) + }, + } + + var ignoreError bool + var cmdScenariosInstall = &cobra.Command{ + Use: "install [config]", + Short: "Install given scenario(s)", + Long: `Fetch and install given scenario(s) from hub`, + Example: `cscli scenarios install crowdsec/xxx crowdsec/xyz`, + Args: cobra.MinimumNArgs(1), + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compAllItems(cwhub.SCENARIOS, args, toComplete) + }, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + for _, name := range args { + t := cwhub.GetItem(cwhub.SCENARIOS, name) + if t == nil { + nearestItem, score := GetDistance(cwhub.SCENARIOS, name) + Suggest(cwhub.SCENARIOS, name, nearestItem.Name, score, ignoreError) + continue + } + if err := cwhub.InstallItem(csConfig, name, cwhub.SCENARIOS, forceAction, downloadOnly); err != nil { + if ignoreError { + log.Errorf("Error while installing '%s': %s", name, err) + } else { + log.Fatalf("Error while installing '%s': %s", name, err) + } + } + } + }, + } + cmdScenariosInstall.PersistentFlags().BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + cmdScenariosInstall.PersistentFlags().BoolVar(&forceAction, "force", false, "Force install : Overwrite tainted and outdated files") + cmdScenariosInstall.PersistentFlags().BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple scenarios") + cmdScenarios.AddCommand(cmdScenariosInstall) + + var cmdScenariosRemove = &cobra.Command{ + Use: "remove [config]", + Short: "Remove given scenario(s)", + Long: `remove given scenario(s)`, + Example: `cscli scenarios remove crowdsec/xxx crowdsec/xyz`, + Aliases: []string{"delete"}, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.SCENARIOS, args, toComplete) + }, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if all { + cwhub.RemoveMany(csConfig, cwhub.SCENARIOS, "", all, purge, forceAction) + return + } + + if len(args) == 0 { + log.Fatalf("Specify at least one scenario to remove or '--all' flag.") + } + + for _, name := range args { + cwhub.RemoveMany(csConfig, cwhub.SCENARIOS, name, all, purge, forceAction) + } + }, + } + cmdScenariosRemove.PersistentFlags().BoolVar(&purge, "purge", false, "Delete source file too") + cmdScenariosRemove.PersistentFlags().BoolVar(&forceAction, "force", false, "Force remove : Remove tainted and outdated files") + cmdScenariosRemove.PersistentFlags().BoolVar(&all, "all", false, "Delete all the scenarios") + cmdScenarios.AddCommand(cmdScenariosRemove) + + var cmdScenariosUpgrade = &cobra.Command{ + Use: "upgrade [config]", + Short: "Upgrade given scenario(s)", + Long: `Fetch and Upgrade given scenario(s) from hub`, + Example: `cscli scenarios upgrade crowdsec/xxx crowdsec/xyz`, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.SCENARIOS, args, toComplete) + }, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if all { + cwhub.UpgradeConfig(csConfig, cwhub.SCENARIOS, "", forceAction) + } else { + if len(args) == 0 { + log.Fatalf("no target scenario to upgrade") + } + for _, name := range args { + cwhub.UpgradeConfig(csConfig, cwhub.SCENARIOS, name, forceAction) + } + } + }, + } + cmdScenariosUpgrade.PersistentFlags().BoolVarP(&all, "all", "a", false, "Upgrade all the scenarios") + cmdScenariosUpgrade.PersistentFlags().BoolVar(&forceAction, "force", false, "Force upgrade : Overwrite tainted and outdated files") + cmdScenarios.AddCommand(cmdScenariosUpgrade) + + var cmdScenariosInspect = &cobra.Command{ + Use: "inspect [config]", + Short: "Inspect given scenario", + Long: `Inspect given scenario`, + Example: `cscli scenarios inspect crowdsec/xxx`, + Args: cobra.MinimumNArgs(1), + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cwhub.SCENARIOS, args, toComplete) + }, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + InspectItem(args[0], cwhub.SCENARIOS) + }, + } + cmdScenariosInspect.PersistentFlags().StringVarP(&prometheusURL, "url", "u", "", "Prometheus url") + cmdScenarios.AddCommand(cmdScenariosInspect) + + var cmdScenariosList = &cobra.Command{ + Use: "list [config]", + Short: "List all scenario(s) or given one", + Long: `List all scenario(s) or given one`, + Example: `cscli scenarios list +cscli scenarios list crowdsecurity/xxx`, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + ListItems(color.Output, []string{cwhub.SCENARIOS}, args, false, true, all) + }, + } + cmdScenariosList.PersistentFlags().BoolVarP(&all, "all", "a", false, "List disabled items as well") + cmdScenarios.AddCommand(cmdScenariosList) + + return cmdScenarios +} diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go new file mode 100644 index 0000000..e9dd06c --- /dev/null +++ b/cmd/crowdsec-cli/simulation.go @@ -0,0 +1,251 @@ +package main + +import ( + "fmt" + "os" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func addToExclusion(name string) error { + csConfig.Cscli.SimulationConfig.Exclusions = append(csConfig.Cscli.SimulationConfig.Exclusions, name) + return nil +} + +func removeFromExclusion(name string) error { + index := indexOf(name, csConfig.Cscli.SimulationConfig.Exclusions) + + // Remove element from the slice + csConfig.Cscli.SimulationConfig.Exclusions[index] = csConfig.Cscli.SimulationConfig.Exclusions[len(csConfig.Cscli.SimulationConfig.Exclusions)-1] + csConfig.Cscli.SimulationConfig.Exclusions[len(csConfig.Cscli.SimulationConfig.Exclusions)-1] = "" + csConfig.Cscli.SimulationConfig.Exclusions = csConfig.Cscli.SimulationConfig.Exclusions[:len(csConfig.Cscli.SimulationConfig.Exclusions)-1] + + return nil +} + +func enableGlobalSimulation() error { + csConfig.Cscli.SimulationConfig.Simulation = new(bool) + *csConfig.Cscli.SimulationConfig.Simulation = true + csConfig.Cscli.SimulationConfig.Exclusions = []string{} + + if err := dumpSimulationFile(); err != nil { + log.Fatalf("unable to dump simulation file: %s", err) + } + + log.Printf("global simulation: enabled") + + return nil +} + +func dumpSimulationFile() error { + newConfigSim, err := yaml.Marshal(csConfig.Cscli.SimulationConfig) + if err != nil { + return fmt.Errorf("unable to marshal simulation configuration: %s", err) + } + err = os.WriteFile(csConfig.ConfigPaths.SimulationFilePath, newConfigSim, 0644) + if err != nil { + return fmt.Errorf("write simulation config in '%s' failed: %s", csConfig.ConfigPaths.SimulationFilePath, err) + } + log.Debugf("updated simulation file %s", csConfig.ConfigPaths.SimulationFilePath) + + return nil +} + +func disableGlobalSimulation() error { + csConfig.Cscli.SimulationConfig.Simulation = new(bool) + *csConfig.Cscli.SimulationConfig.Simulation = false + + csConfig.Cscli.SimulationConfig.Exclusions = []string{} + newConfigSim, err := yaml.Marshal(csConfig.Cscli.SimulationConfig) + if err != nil { + return fmt.Errorf("unable to marshal new simulation configuration: %s", err) + } + err = os.WriteFile(csConfig.ConfigPaths.SimulationFilePath, newConfigSim, 0644) + if err != nil { + return fmt.Errorf("unable to write new simulation config in '%s' : %s", csConfig.ConfigPaths.SimulationFilePath, err) + } + + log.Printf("global simulation: disabled") + return nil +} + +func simulationStatus() error { + if csConfig.Cscli.SimulationConfig == nil { + log.Printf("global simulation: disabled (configuration file is missing)") + return nil + } + if *csConfig.Cscli.SimulationConfig.Simulation { + log.Println("global simulation: enabled") + if len(csConfig.Cscli.SimulationConfig.Exclusions) > 0 { + log.Println("Scenarios not in simulation mode :") + for _, scenario := range csConfig.Cscli.SimulationConfig.Exclusions { + log.Printf(" - %s", scenario) + } + } + } else { + log.Println("global simulation: disabled") + if len(csConfig.Cscli.SimulationConfig.Exclusions) > 0 { + log.Println("Scenarios in simulation mode :") + for _, scenario := range csConfig.Cscli.SimulationConfig.Exclusions { + log.Printf(" - %s", scenario) + } + } + } + return nil +} + +func NewSimulationCmds() *cobra.Command { + var cmdSimulation = &cobra.Command{ + Use: "simulation [command]", + Short: "Manage simulation status of scenarios", + Example: `cscli simulation status +cscli simulation enable crowdsecurity/ssh-bf +cscli simulation disable crowdsecurity/ssh-bf`, + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := csConfig.LoadSimulation(); err != nil { + log.Fatalf(err.Error()) + } + if csConfig.Cscli == nil { + return fmt.Errorf("you must configure cli before using simulation") + } + if csConfig.Cscli.SimulationConfig == nil { + return fmt.Errorf("no simulation configured") + } + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + if cmd.Name() != "status" { + log.Infof(ReloadMessage()) + } + }, + } + cmdSimulation.Flags().SortFlags = false + cmdSimulation.PersistentFlags().SortFlags = false + + var forceGlobalSimulation bool + var cmdSimulationEnable = &cobra.Command{ + Use: "enable [scenario] [-global]", + Short: "Enable the simulation, globally or on specified scenarios", + Example: `cscli simulation enable`, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + log.Info("Run 'sudo cscli hub update' to get the hub index") + log.Fatalf("Failed to get Hub index : %v", err) + } + + if len(args) > 0 { + for _, scenario := range args { + var item = cwhub.GetItem(cwhub.SCENARIOS, scenario) + if item == nil { + log.Errorf("'%s' doesn't exist or is not a scenario", scenario) + continue + } + if !item.Installed { + log.Warningf("'%s' isn't enabled", scenario) + } + isExcluded := inSlice(scenario, csConfig.Cscli.SimulationConfig.Exclusions) + if *csConfig.Cscli.SimulationConfig.Simulation && !isExcluded { + log.Warning("global simulation is already enabled") + continue + } + if !*csConfig.Cscli.SimulationConfig.Simulation && isExcluded { + log.Warningf("simulation for '%s' already enabled", scenario) + continue + } + if *csConfig.Cscli.SimulationConfig.Simulation && isExcluded { + if err := removeFromExclusion(scenario); err != nil { + log.Fatal(err) + } + log.Printf("simulation enabled for '%s'", scenario) + continue + } + if err := addToExclusion(scenario); err != nil { + log.Fatal(err) + } + log.Printf("simulation mode for '%s' enabled", scenario) + } + if err := dumpSimulationFile(); err != nil { + log.Fatalf("simulation enable: %s", err) + } + } else if forceGlobalSimulation { + if err := enableGlobalSimulation(); err != nil { + log.Fatalf("unable to enable global simulation mode : %s", err) + } + } else { + printHelp(cmd) + } + }, + } + cmdSimulationEnable.Flags().BoolVarP(&forceGlobalSimulation, "global", "g", false, "Enable global simulation (reverse mode)") + cmdSimulation.AddCommand(cmdSimulationEnable) + + var cmdSimulationDisable = &cobra.Command{ + Use: "disable [scenario]", + Short: "Disable the simulation mode. Disable only specified scenarios", + Example: `cscli simulation disable`, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if len(args) > 0 { + for _, scenario := range args { + isExcluded := inSlice(scenario, csConfig.Cscli.SimulationConfig.Exclusions) + if !*csConfig.Cscli.SimulationConfig.Simulation && !isExcluded { + log.Warningf("%s isn't in simulation mode", scenario) + continue + } + if !*csConfig.Cscli.SimulationConfig.Simulation && isExcluded { + if err := removeFromExclusion(scenario); err != nil { + log.Fatal(err) + } + log.Printf("simulation mode for '%s' disabled", scenario) + continue + } + if isExcluded { + log.Warningf("simulation mode is enabled but is already disable for '%s'", scenario) + continue + } + if err := addToExclusion(scenario); err != nil { + log.Fatal(err) + } + log.Printf("simulation mode for '%s' disabled", scenario) + } + if err := dumpSimulationFile(); err != nil { + log.Fatalf("simulation disable: %s", err) + } + } else if forceGlobalSimulation { + if err := disableGlobalSimulation(); err != nil { + log.Fatalf("unable to disable global simulation mode : %s", err) + } + } else { + printHelp(cmd) + } + }, + } + cmdSimulationDisable.Flags().BoolVarP(&forceGlobalSimulation, "global", "g", false, "Disable global simulation (reverse mode)") + cmdSimulation.AddCommand(cmdSimulationDisable) + + var cmdSimulationStatus = &cobra.Command{ + Use: "status", + Short: "Show simulation mode status", + Example: `cscli simulation status`, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := simulationStatus(); err != nil { + log.Fatal(err) + } + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + }, + } + cmdSimulation.AddCommand(cmdSimulationStatus) + + return cmdSimulation +} diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go new file mode 100644 index 0000000..84c0e27 --- /dev/null +++ b/cmd/crowdsec-cli/support.go @@ -0,0 +1,407 @@ +package main + +import ( + "archive/zip" + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/blackfireio/osinfo" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +const ( + SUPPORT_METRICS_HUMAN_PATH = "metrics/metrics.human" + SUPPORT_METRICS_PROMETHEUS_PATH = "metrics/metrics.prometheus" + SUPPORT_VERSION_PATH = "version.txt" + SUPPORT_OS_INFO_PATH = "osinfo.txt" + SUPPORT_PARSERS_PATH = "hub/parsers.txt" + SUPPORT_SCENARIOS_PATH = "hub/scenarios.txt" + SUPPORT_COLLECTIONS_PATH = "hub/collections.txt" + SUPPORT_POSTOVERFLOWS_PATH = "hub/postoverflows.txt" + SUPPORT_BOUNCERS_PATH = "lapi/bouncers.txt" + SUPPORT_AGENTS_PATH = "lapi/agents.txt" + SUPPORT_CROWDSEC_CONFIG_PATH = "config/crowdsec.yaml" + SUPPORT_LAPI_STATUS_PATH = "lapi_status.txt" + SUPPORT_CAPI_STATUS_PATH = "capi_status.txt" + SUPPORT_ACQUISITION_CONFIG_BASE_PATH = "config/acquis/" + SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml" +) + +func collectMetrics() ([]byte, []byte, error) { + log.Info("Collecting prometheus metrics") + err := csConfig.LoadPrometheus() + if err != nil { + return nil, nil, err + } + + if csConfig.Cscli.PrometheusUrl == "" { + log.Warn("No Prometheus URL configured, metrics will not be collected") + return nil, nil, fmt.Errorf("prometheus_uri is not set") + } + + humanMetrics := bytes.NewBuffer(nil) + err = FormatPrometheusMetrics(humanMetrics, csConfig.Cscli.PrometheusUrl+"/metrics", "human") + + if err != nil { + return nil, nil, fmt.Errorf("could not fetch promtheus metrics: %s", err) + } + + req, err := http.NewRequest(http.MethodGet, csConfig.Cscli.PrometheusUrl+"/metrics", nil) + if err != nil { + return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %s", err) + } + client := &http.Client{} + resp, err := client.Do(req) + + if err != nil { + return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %s", err) + } + + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, nil, fmt.Errorf("could not read metrics from prometheus endpoint: %s", err) + } + + return humanMetrics.Bytes(), body, nil +} + +func collectVersion() []byte { + log.Info("Collecting version") + return []byte(cwversion.ShowStr()) +} + +func collectOSInfo() ([]byte, error) { + log.Info("Collecting OS info") + info, err := osinfo.GetOSInfo() + + if err != nil { + return nil, err + } + + w := bytes.NewBuffer(nil) + w.WriteString(fmt.Sprintf("Architecture: %s\n", info.Architecture)) + w.WriteString(fmt.Sprintf("Family: %s\n", info.Family)) + w.WriteString(fmt.Sprintf("ID: %s\n", info.ID)) + w.WriteString(fmt.Sprintf("Name: %s\n", info.Name)) + w.WriteString(fmt.Sprintf("Codename: %s\n", info.Codename)) + w.WriteString(fmt.Sprintf("Version: %s\n", info.Version)) + w.WriteString(fmt.Sprintf("Build: %s\n", info.Build)) + + return w.Bytes(), nil +} + +func initHub() error { + if err := csConfig.LoadHub(); err != nil { + return fmt.Errorf("cannot load hub: %s", err) + } + if csConfig.Hub == nil { + return fmt.Errorf("hub not configured") + } + + if err := cwhub.SetHubBranch(); err != nil { + return fmt.Errorf("cannot set hub branch: %s", err) + } + + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + return fmt.Errorf("no hub index found: %s", err) + } + return nil +} + +func collectHubItems(itemType string) []byte { + out := bytes.NewBuffer(nil) + log.Infof("Collecting %s list", itemType) + ListItems(out, []string{itemType}, []string{}, false, true, all) + return out.Bytes() +} + +func collectBouncers(dbClient *database.Client) ([]byte, error) { + out := bytes.NewBuffer(nil) + err := getBouncers(out, dbClient) + if err != nil { + return nil, err + } + return out.Bytes(), nil +} + +func collectAgents(dbClient *database.Client) ([]byte, error) { + out := bytes.NewBuffer(nil) + err := getAgents(out, dbClient) + if err != nil { + return nil, err + } + return out.Bytes(), nil +} + +func collectAPIStatus(login string, password string, endpoint string, prefix string) []byte { + if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil { + return []byte("No agent credentials found, are we LAPI ?") + } + pwd := strfmt.Password(password) + apiurl, err := url.Parse(endpoint) + + if err != nil { + return []byte(fmt.Sprintf("cannot parse API URL: %s", err.Error())) + } + scenarios, err := cwhub.GetInstalledScenariosAsString() + if err != nil { + return []byte(fmt.Sprintf("could not collect scenarios: %s", err.Error())) + } + + Client, err = apiclient.NewDefaultClient(apiurl, + prefix, + fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + nil) + if err != nil { + return []byte(fmt.Sprintf("could not init client: %s", err.Error())) + } + t := models.WatcherAuthRequest{ + MachineID: &login, + Password: &pwd, + Scenarios: scenarios, + } + + _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) + if err != nil { + return []byte(fmt.Sprintf("Could not authenticate to API: %s", err)) + } else { + return []byte("Successfully authenticated to LAPI") + } +} + +func collectCrowdsecConfig() []byte { + log.Info("Collecting crowdsec config") + config, err := os.ReadFile(*csConfig.FilePath) + if err != nil { + return []byte(fmt.Sprintf("could not read config file: %s", err)) + } + + r := regexp.MustCompile(`(\s+password:|\s+user:|\s+host:)\s+.*`) + + return r.ReplaceAll(config, []byte("$1 ****REDACTED****")) +} + +func collectCrowdsecProfile() []byte { + log.Info("Collecting crowdsec profile") + config, err := os.ReadFile(csConfig.API.Server.ProfilesPath) + if err != nil { + return []byte(fmt.Sprintf("could not read profile file: %s", err)) + } + return config +} + +func collectAcquisitionConfig() map[string][]byte { + log.Info("Collecting acquisition config") + ret := make(map[string][]byte) + + for _, filename := range csConfig.Crowdsec.AcquisitionFiles { + fileContent, err := os.ReadFile(filename) + if err != nil { + ret[filename] = []byte(fmt.Sprintf("could not read file: %s", err)) + } else { + ret[filename] = fileContent + } + } + + return ret +} + +func NewSupportCmd() *cobra.Command { + var cmdSupport = &cobra.Command{ + Use: "support [action]", + Short: "Provide commands to help during support", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return nil + }, + } + + var outFile string + + cmdDump := &cobra.Command{ + Use: "dump", + Short: "Dump all your configuration to a zip file for easier support", + Long: `Dump the following informations: +- Crowdsec version +- OS version +- Installed collections list +- Installed parsers list +- Installed scenarios list +- Installed postoverflows list +- Bouncers list +- Machines list +- CAPI status +- LAPI status +- Crowdsec config (sensitive information like username and password are redacted) +- Crowdsec metrics`, + Example: `cscli support dump +cscli support dump -f /tmp/crowdsec-support.zip +`, + Args: cobra.NoArgs, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + var err error + var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool + infos := map[string][]byte{ + SUPPORT_VERSION_PATH: collectVersion(), + } + + if outFile == "" { + outFile = "/tmp/crowdsec-support.zip" + } + + dbClient, err = database.NewClient(csConfig.DbConfig) + + if err != nil { + log.Warnf("Could not connect to database: %s", err) + skipDB = true + infos[SUPPORT_BOUNCERS_PATH] = []byte(err.Error()) + infos[SUPPORT_AGENTS_PATH] = []byte(err.Error()) + } + + if err := csConfig.LoadAPIServer(); err != nil { + log.Warnf("could not load LAPI, skipping CAPI check") + skipLAPI = true + infos[SUPPORT_CAPI_STATUS_PATH] = []byte(err.Error()) + } + + if err := csConfig.LoadCrowdsec(); err != nil { + log.Warnf("could not load agent config, skipping crowdsec config check") + skipAgent = true + } + + err = initHub() + + if err != nil { + log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected") + skipHub = true + infos[SUPPORT_PARSERS_PATH] = []byte(err.Error()) + infos[SUPPORT_SCENARIOS_PATH] = []byte(err.Error()) + infos[SUPPORT_POSTOVERFLOWS_PATH] = []byte(err.Error()) + infos[SUPPORT_COLLECTIONS_PATH] = []byte(err.Error()) + } + + if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil { + log.Warn("no agent credentials found, skipping LAPI connectivity check") + if _, ok := infos[SUPPORT_LAPI_STATUS_PATH]; ok { + infos[SUPPORT_LAPI_STATUS_PATH] = append(infos[SUPPORT_LAPI_STATUS_PATH], []byte("\nNo LAPI credentials found")...) + } + skipLAPI = true + } + + if csConfig.API.Server == nil || csConfig.API.Server.OnlineClient.Credentials == nil { + log.Warn("no CAPI credentials found, skipping CAPI connectivity check") + skipCAPI = true + } + + infos[SUPPORT_METRICS_HUMAN_PATH], infos[SUPPORT_METRICS_PROMETHEUS_PATH], err = collectMetrics() + if err != nil { + log.Warnf("could not collect prometheus metrics information: %s", err) + infos[SUPPORT_METRICS_HUMAN_PATH] = []byte(err.Error()) + infos[SUPPORT_METRICS_PROMETHEUS_PATH] = []byte(err.Error()) + } + + infos[SUPPORT_OS_INFO_PATH], err = collectOSInfo() + + if err != nil { + log.Warnf("could not collect OS information: %s", err) + infos[SUPPORT_OS_INFO_PATH] = []byte(err.Error()) + } + + infos[SUPPORT_CROWDSEC_CONFIG_PATH] = collectCrowdsecConfig() + + if !skipHub { + infos[SUPPORT_PARSERS_PATH] = collectHubItems(cwhub.PARSERS) + infos[SUPPORT_SCENARIOS_PATH] = collectHubItems(cwhub.SCENARIOS) + infos[SUPPORT_POSTOVERFLOWS_PATH] = collectHubItems(cwhub.PARSERS_OVFLW) + infos[SUPPORT_COLLECTIONS_PATH] = collectHubItems(cwhub.COLLECTIONS) + } + + if !skipDB { + infos[SUPPORT_BOUNCERS_PATH], err = collectBouncers(dbClient) + if err != nil { + log.Warnf("could not collect bouncers information: %s", err) + infos[SUPPORT_BOUNCERS_PATH] = []byte(err.Error()) + } + + infos[SUPPORT_AGENTS_PATH], err = collectAgents(dbClient) + if err != nil { + log.Warnf("could not collect agents information: %s", err) + infos[SUPPORT_AGENTS_PATH] = []byte(err.Error()) + } + } + + if !skipCAPI { + log.Info("Collecting CAPI status") + infos[SUPPORT_CAPI_STATUS_PATH] = collectAPIStatus(csConfig.API.Server.OnlineClient.Credentials.Login, + csConfig.API.Server.OnlineClient.Credentials.Password, + csConfig.API.Server.OnlineClient.Credentials.URL, + CAPIURLPrefix) + } + + if !skipLAPI { + log.Info("Collection LAPI status") + infos[SUPPORT_LAPI_STATUS_PATH] = collectAPIStatus(csConfig.API.Client.Credentials.Login, + csConfig.API.Client.Credentials.Password, + csConfig.API.Client.Credentials.URL, + LAPIURLPrefix) + infos[SUPPORT_CROWDSEC_PROFILE_PATH] = collectCrowdsecProfile() + } + + if !skipAgent { + + acquis := collectAcquisitionConfig() + + for filename, content := range acquis { + fname := strings.ReplaceAll(filename, string(filepath.Separator), "___") + infos[SUPPORT_ACQUISITION_CONFIG_BASE_PATH+fname] = content + } + } + + w := bytes.NewBuffer(nil) + zipWriter := zip.NewWriter(w) + + for filename, data := range infos { + fw, err := zipWriter.Create(filename) + if err != nil { + log.Errorf("Could not add zip entry for %s: %s", filename, err) + continue + } + fw.Write([]byte(types.StripAnsiString(string(data)))) + } + err = zipWriter.Close() + if err != nil { + log.Fatalf("could not finalize zip file: %s", err) + } + err = os.WriteFile(outFile, w.Bytes(), 0600) + if err != nil { + log.Fatalf("could not write zip file to %s: %s", outFile, err) + } + log.Infof("Written zip file to %s", outFile) + }, + } + cmdDump.Flags().StringVarP(&outFile, "outFile", "f", "", "File to dump the information to") + cmdSupport.AddCommand(cmdDump) + + return cmdSupport +} diff --git a/cmd/crowdsec-cli/tables.go b/cmd/crowdsec-cli/tables.go new file mode 100644 index 0000000..2c3173d --- /dev/null +++ b/cmd/crowdsec-cli/tables.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "io" + "os" + + "github.com/aquasecurity/table" + isatty "github.com/mattn/go-isatty" +) + +func shouldWeColorize() bool { + if csConfig.Cscli.Color == "yes" { + return true + } + if csConfig.Cscli.Color == "no" { + return false + } + return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) +} + +func newTable(out io.Writer) *table.Table { + if out == nil { + panic("newTable: out is nil") + } + t := table.New(out) + if shouldWeColorize() { + t.SetLineStyle(table.StyleBrightBlack) + t.SetHeaderStyle(table.StyleItalic) + } + + if shouldWeColorize() { + t.SetDividers(table.UnicodeRoundedDividers) + } else { + t.SetDividers(table.ASCIIDividers) + } + + return t +} + +func newLightTable(out io.Writer) *table.Table { + if out == nil { + panic("newTable: out is nil") + } + t := newTable(out) + t.SetRowLines(false) + t.SetBorderLeft(false) + t.SetBorderRight(false) + // This leaves three spaces between columns: + // left padding, invisible border, right padding + // There is no way to make two spaces without + // a SetColumnLines() method, but it's close enough. + t.SetPadding(1) + + if shouldWeColorize() { + t.SetDividers(table.Dividers{ + ALL: "─", + NES: "─", + NSW: "─", + NEW: "─", + ESW: "─", + NE: "─", + NW: "─", + SW: "─", + ES: "─", + EW: "─", + NS: " ", + }) + } else { + t.SetDividers(table.Dividers{ + ALL: "-", + NES: "-", + NSW: "-", + NEW: "-", + ESW: "-", + NE: "-", + NW: "-", + SW: "-", + ES: "-", + EW: "-", + NS: " ", + }) + } + return t +} + +func renderTableTitle(out io.Writer, title string) { + if out == nil { + panic("renderTableTitle: out is nil") + } + if title == "" { + return + } + fmt.Fprintln(out, title) +} diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go new file mode 100644 index 0000000..89c20f6 --- /dev/null +++ b/cmd/crowdsec-cli/utils.go @@ -0,0 +1,734 @@ +package main + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "io" + "math" + "net" + "net/http" + "os" + "strconv" + "strings" + "time" + + "github.com/fatih/color" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/prom2json" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/texttheater/golang-levenshtein/levenshtein" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +const MaxDistance = 7 + +func printHelp(cmd *cobra.Command) { + err := cmd.Help() + if err != nil { + log.Fatalf("unable to print help(): %s", err) + } +} + +func inSlice(s string, slice []string) bool { + for _, str := range slice { + if s == str { + return true + } + } + return false +} + +func indexOf(s string, slice []string) int { + for i, elem := range slice { + if s == elem { + return i + } + } + return -1 +} + +func LoadHub() error { + if err := csConfig.LoadHub(); err != nil { + log.Fatal(err) + } + if csConfig.Hub == nil { + return fmt.Errorf("unable to load hub") + } + + if err := cwhub.SetHubBranch(); err != nil { + log.Warningf("unable to set hub branch (%s), default to master", err) + } + + if err := cwhub.GetHubIdx(csConfig.Hub); err != nil { + return fmt.Errorf("Failed to get Hub index : '%w'. Run 'sudo cscli hub update' to get the hub index", err) + } + + return nil +} + +func Suggest(itemType string, baseItem string, suggestItem string, score int, ignoreErr bool) { + errMsg := "" + if score < MaxDistance { + errMsg = fmt.Sprintf("unable to find %s '%s', did you mean %s ?", itemType, baseItem, suggestItem) + } else { + errMsg = fmt.Sprintf("unable to find %s '%s'", itemType, baseItem) + } + if ignoreErr { + log.Error(errMsg) + } else { + log.Fatalf(errMsg) + } +} + +func GetDistance(itemType string, itemName string) (*cwhub.Item, int) { + allItems := make([]string, 0) + nearestScore := 100 + nearestItem := &cwhub.Item{} + hubItems := cwhub.GetHubStatusForItemType(itemType, "", true) + for _, item := range hubItems { + allItems = append(allItems, item.Name) + } + + for _, s := range allItems { + d := levenshtein.DistanceForStrings([]rune(itemName), []rune(s), levenshtein.DefaultOptions) + if d < nearestScore { + nearestScore = d + nearestItem = cwhub.GetItem(itemType, s) + } + } + return nearestItem, nearestScore +} + +func compAllItems(itemType string, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if err := LoadHub(); err != nil { + return nil, cobra.ShellCompDirectiveDefault + } + + comp := make([]string, 0) + hubItems := cwhub.GetHubStatusForItemType(itemType, "", true) + for _, item := range hubItems { + if !inSlice(item.Name, args) && strings.Contains(item.Name, toComplete) { + comp = append(comp, item.Name) + } + } + cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) + return comp, cobra.ShellCompDirectiveNoFileComp +} + +func compInstalledItems(itemType string, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if err := LoadHub(); err != nil { + return nil, cobra.ShellCompDirectiveDefault + } + + var items []string + var err error + switch itemType { + case cwhub.PARSERS: + items, err = cwhub.GetInstalledParsersAsString() + case cwhub.SCENARIOS: + items, err = cwhub.GetInstalledScenariosAsString() + case cwhub.PARSERS_OVFLW: + items, err = cwhub.GetInstalledPostOverflowsAsString() + case cwhub.COLLECTIONS: + items, err = cwhub.GetInstalledCollectionsAsString() + default: + return nil, cobra.ShellCompDirectiveDefault + } + + if err != nil { + cobra.CompDebugln(fmt.Sprintf("list installed %s err: %s", itemType, err), true) + return nil, cobra.ShellCompDirectiveDefault + } + comp := make([]string, 0) + + if toComplete != "" { + for _, item := range items { + if strings.Contains(item, toComplete) { + comp = append(comp, item) + } + } + } else { + comp = items + } + + cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) + + return comp, cobra.ShellCompDirectiveNoFileComp +} + +func ListItems(out io.Writer, itemTypes []string, args []string, showType bool, showHeader bool, all bool) { + var hubStatusByItemType = make(map[string][]cwhub.ItemHubStatus) + + for _, itemType := range itemTypes { + itemName := "" + if len(args) == 1 { + itemName = args[0] + } + hubStatusByItemType[itemType] = cwhub.GetHubStatusForItemType(itemType, itemName, all) + } + + if csConfig.Cscli.Output == "human" { + for _, itemType := range itemTypes { + var statuses []cwhub.ItemHubStatus + var ok bool + if statuses, ok = hubStatusByItemType[itemType]; !ok { + log.Errorf("unknown item type: %s", itemType) + continue + } + listHubItemTable(out, "\n"+strings.ToUpper(itemType), statuses) + } + } else if csConfig.Cscli.Output == "json" { + x, err := json.MarshalIndent(hubStatusByItemType, "", " ") + if err != nil { + log.Fatalf("failed to unmarshal") + } + out.Write(x) + } else if csConfig.Cscli.Output == "raw" { + csvwriter := csv.NewWriter(out) + if showHeader { + header := []string{"name", "status", "version", "description"} + if showType { + header = append(header, "type") + } + err := csvwriter.Write(header) + if err != nil { + log.Fatalf("failed to write header: %s", err) + } + + } + for _, itemType := range itemTypes { + var statuses []cwhub.ItemHubStatus + var ok bool + if statuses, ok = hubStatusByItemType[itemType]; !ok { + log.Errorf("unknown item type: %s", itemType) + continue + } + for _, status := range statuses { + if status.LocalVersion == "" { + status.LocalVersion = "n/a" + } + row := []string{ + status.Name, + status.Status, + status.LocalVersion, + status.Description, + } + if showType { + row = append(row, itemType) + } + err := csvwriter.Write(row) + if err != nil { + log.Fatalf("failed to write raw output : %s", err) + } + } + } + csvwriter.Flush() + } +} + +func InspectItem(name string, objecitemType string) { + + hubItem := cwhub.GetItem(objecitemType, name) + if hubItem == nil { + log.Fatalf("unable to retrieve item.") + } + var b []byte + var err error + switch csConfig.Cscli.Output { + case "human", "raw": + b, err = yaml.Marshal(*hubItem) + if err != nil { + log.Fatalf("unable to marshal item : %s", err) + } + case "json": + b, err = json.MarshalIndent(*hubItem, "", " ") + if err != nil { + log.Fatalf("unable to marshal item : %s", err) + } + } + fmt.Printf("%s", string(b)) + if csConfig.Cscli.Output == "json" || csConfig.Cscli.Output == "raw" { + return + } + + if prometheusURL == "" { + //This is technically wrong to do this, as the prometheus section contains a listen address, not an URL to query prometheus + //But for ease of use, we will use the listen address as the prometheus URL because it will be 127.0.0.1 in the default case + listenAddr := csConfig.Prometheus.ListenAddr + if listenAddr == "" { + listenAddr = "127.0.0.1" + } + listenPort := csConfig.Prometheus.ListenPort + if listenPort == 0 { + listenPort = 6060 + } + prometheusURL = fmt.Sprintf("http://%s:%d/metrics", listenAddr, listenPort) + log.Debugf("No prometheus URL provided using: %s", prometheusURL) + } + + fmt.Printf("\nCurrent metrics : \n") + ShowMetrics(hubItem) +} + +func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value *string) error { + + /*if a range is provided, change the scope*/ + if *ipRange != "" { + _, _, err := net.ParseCIDR(*ipRange) + if err != nil { + return fmt.Errorf("%s isn't a valid range", *ipRange) + } + } + if *ip != "" { + ipRepr := net.ParseIP(*ip) + if ipRepr == nil { + return fmt.Errorf("%s isn't a valid ip", *ip) + } + } + + //avoid confusion on scope (ip vs Ip and range vs Range) + switch strings.ToLower(*scope) { + case "ip": + *scope = types.Ip + case "range": + *scope = types.Range + case "country": + *scope = types.Country + case "as": + *scope = types.AS + } + return nil +} + +func ShowMetrics(hubItem *cwhub.Item) { + switch hubItem.Type { + case cwhub.PARSERS: + metrics := GetParserMetric(prometheusURL, hubItem.Name) + parserMetricsTable(color.Output, hubItem.Name, metrics) + case cwhub.SCENARIOS: + metrics := GetScenarioMetric(prometheusURL, hubItem.Name) + scenarioMetricsTable(color.Output, hubItem.Name, metrics) + case cwhub.COLLECTIONS: + for _, item := range hubItem.Parsers { + metrics := GetParserMetric(prometheusURL, item) + parserMetricsTable(color.Output, item, metrics) + } + for _, item := range hubItem.Scenarios { + metrics := GetScenarioMetric(prometheusURL, item) + scenarioMetricsTable(color.Output, item, metrics) + } + for _, item := range hubItem.Collections { + hubItem = cwhub.GetItem(cwhub.COLLECTIONS, item) + if hubItem == nil { + log.Fatalf("unable to retrieve item '%s' from collection '%s'", item, hubItem.Name) + } + ShowMetrics(hubItem) + } + default: + log.Errorf("item of type '%s' is unknown", hubItem.Type) + } +} + +// GetParserMetric is a complete rip from prom2json +func GetParserMetric(url string, itemName string) map[string]map[string]int { + stats := make(map[string]map[string]int) + + result := GetPrometheusMetric(url) + for idx, fam := range result { + if !strings.HasPrefix(fam.Name, "cs_") { + continue + } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { + metric, ok := m.(prom2json.Metric) + if !ok { + log.Debugf("failed to convert metric to prom2json.Metric") + continue + } + name, ok := metric.Labels["name"] + if !ok { + log.Debugf("no name in Metric %v", metric.Labels) + } + if name != itemName { + continue + } + source, ok := metric.Labels["source"] + if !ok { + log.Debugf("no source in Metric %v", metric.Labels) + } else { + if srctype, ok := metric.Labels["type"]; ok { + source = srctype + ":" + source + } + } + value := m.(prom2json.Metric).Value + fval, err := strconv.ParseFloat(value, 32) + if err != nil { + log.Errorf("Unexpected int value %s : %s", value, err) + continue + } + ival := int(fval) + + switch fam.Name { + case "cs_reader_hits_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + stats[source]["parsed"] = 0 + stats[source]["reads"] = 0 + stats[source]["unparsed"] = 0 + stats[source]["hits"] = 0 + } + stats[source]["reads"] += ival + case "cs_parser_hits_ok_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + } + stats[source]["parsed"] += ival + case "cs_parser_hits_ko_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + } + stats[source]["unparsed"] += ival + case "cs_node_hits_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + } + stats[source]["hits"] += ival + case "cs_node_hits_ok_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + } + stats[source]["parsed"] += ival + case "cs_node_hits_ko_total": + if _, ok := stats[source]; !ok { + stats[source] = make(map[string]int) + } + stats[source]["unparsed"] += ival + default: + continue + } + } + } + return stats +} + +func GetScenarioMetric(url string, itemName string) map[string]int { + stats := make(map[string]int) + + stats["instantiation"] = 0 + stats["curr_count"] = 0 + stats["overflow"] = 0 + stats["pour"] = 0 + stats["underflow"] = 0 + + result := GetPrometheusMetric(url) + for idx, fam := range result { + if !strings.HasPrefix(fam.Name, "cs_") { + continue + } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { + metric, ok := m.(prom2json.Metric) + if !ok { + log.Debugf("failed to convert metric to prom2json.Metric") + continue + } + name, ok := metric.Labels["name"] + if !ok { + log.Debugf("no name in Metric %v", metric.Labels) + } + if name != itemName { + continue + } + value := m.(prom2json.Metric).Value + fval, err := strconv.ParseFloat(value, 32) + if err != nil { + log.Errorf("Unexpected int value %s : %s", value, err) + continue + } + ival := int(fval) + + switch fam.Name { + case "cs_bucket_created_total": + stats["instantiation"] += ival + case "cs_buckets": + stats["curr_count"] += ival + case "cs_bucket_overflowed_total": + stats["overflow"] += ival + case "cs_bucket_poured_total": + stats["pour"] += ival + case "cs_bucket_underflowed_total": + stats["underflow"] += ival + default: + continue + } + } + } + return stats +} + +// it's a rip of the cli version, but in silent-mode +func silenceInstallItem(name string, obtype string) (string, error) { + var item = cwhub.GetItem(obtype, name) + if item == nil { + return "", fmt.Errorf("error retrieving item") + } + it := *item + if downloadOnly && it.Downloaded && it.UpToDate { + return fmt.Sprintf("%s is already downloaded and up-to-date", it.Name), nil + } + it, err := cwhub.DownloadLatest(csConfig.Hub, it, forceAction, false) + if err != nil { + return "", fmt.Errorf("error while downloading %s : %v", it.Name, err) + } + if err := cwhub.AddItem(obtype, it); err != nil { + return "", err + } + + if downloadOnly { + return fmt.Sprintf("Downloaded %s to %s", it.Name, csConfig.Cscli.HubDir+"/"+it.RemotePath), nil + } + it, err = cwhub.EnableItem(csConfig.Hub, it) + if err != nil { + return "", fmt.Errorf("error while enabling %s : %v", it.Name, err) + } + if err := cwhub.AddItem(obtype, it); err != nil { + return "", err + } + return fmt.Sprintf("Enabled %s", it.Name), nil +} + +func GetPrometheusMetric(url string) []*prom2json.Family { + mfChan := make(chan *dto.MetricFamily, 1024) + + // Start with the DefaultTransport for sane defaults. + transport := http.DefaultTransport.(*http.Transport).Clone() + // Conservatively disable HTTP keep-alives as this program will only + // ever need a single HTTP request. + transport.DisableKeepAlives = true + // Timeout early if the server doesn't even return the headers. + transport.ResponseHeaderTimeout = time.Minute + + go func() { + defer types.CatchPanic("crowdsec/GetPrometheusMetric") + err := prom2json.FetchMetricFamilies(url, mfChan, transport) + if err != nil { + log.Fatalf("failed to fetch prometheus metrics : %v", err) + } + }() + + result := []*prom2json.Family{} + for mf := range mfChan { + result = append(result, prom2json.NewFamily(mf)) + } + log.Debugf("Finished reading prometheus output, %d entries", len(result)) + + return result +} + +func RestoreHub(dirPath string) error { + var err error + + if err := csConfig.LoadHub(); err != nil { + return err + } + if err := cwhub.SetHubBranch(); err != nil { + return fmt.Errorf("error while setting hub branch: %s", err) + } + + for _, itype := range cwhub.ItemTypes { + itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itype) + if _, err = os.Stat(itemDirectory); err != nil { + log.Infof("no %s in backup", itype) + continue + } + /*restore the upstream items*/ + upstreamListFN := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itype) + file, err := os.ReadFile(upstreamListFN) + if err != nil { + return fmt.Errorf("error while opening %s : %s", upstreamListFN, err) + } + var upstreamList []string + err = json.Unmarshal(file, &upstreamList) + if err != nil { + return fmt.Errorf("error unmarshaling %s : %s", upstreamListFN, err) + } + for _, toinstall := range upstreamList { + label, err := silenceInstallItem(toinstall, itype) + if err != nil { + log.Errorf("Error while installing %s : %s", toinstall, err) + } else if label != "" { + log.Infof("Installed %s : %s", toinstall, label) + } else { + log.Printf("Installed %s : ok", toinstall) + } + } + + /*restore the local and tainted items*/ + files, err := os.ReadDir(itemDirectory) + if err != nil { + return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory, err) + } + for _, file := range files { + //this was the upstream data + if file.Name() == fmt.Sprintf("upstream-%s.json", itype) { + continue + } + if itype == cwhub.PARSERS || itype == cwhub.PARSERS_OVFLW { + //we expect a stage here + if !file.IsDir() { + continue + } + stage := file.Name() + stagedir := fmt.Sprintf("%s/%s/%s/", csConfig.ConfigPaths.ConfigDir, itype, stage) + log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) + if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { + return fmt.Errorf("error while creating stage directory %s : %s", stagedir, err) + } + /*find items*/ + ifiles, err := os.ReadDir(itemDirectory + "/" + stage + "/") + if err != nil { + return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory+"/"+stage, err) + } + //finally copy item + for _, tfile := range ifiles { + log.Infof("Going to restore local/tainted [%s]", tfile.Name()) + sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name()) + destinationFile := fmt.Sprintf("%s%s", stagedir, tfile.Name()) + if err = types.CopyFile(sourceFile, destinationFile); err != nil { + return fmt.Errorf("failed copy %s %s to %s : %s", itype, sourceFile, destinationFile, err) + } + log.Infof("restored %s to %s", sourceFile, destinationFile) + } + } else { + log.Infof("Going to restore local/tainted [%s]", file.Name()) + sourceFile := fmt.Sprintf("%s/%s", itemDirectory, file.Name()) + destinationFile := fmt.Sprintf("%s/%s/%s", csConfig.ConfigPaths.ConfigDir, itype, file.Name()) + if err = types.CopyFile(sourceFile, destinationFile); err != nil { + return fmt.Errorf("failed copy %s %s to %s : %s", itype, sourceFile, destinationFile, err) + } + log.Infof("restored %s to %s", sourceFile, destinationFile) + } + + } + } + return nil +} + +func BackupHub(dirPath string) error { + var err error + var itemDirectory string + var upstreamParsers []string + + for _, itemType := range cwhub.ItemTypes { + clog := log.WithFields(log.Fields{ + "type": itemType, + }) + itemMap := cwhub.GetItemMap(itemType) + if itemMap == nil { + clog.Infof("No %s to backup.", itemType) + continue + } + itemDirectory = fmt.Sprintf("%s/%s/", dirPath, itemType) + if err := os.MkdirAll(itemDirectory, os.ModePerm); err != nil { + return fmt.Errorf("error while creating %s : %s", itemDirectory, err) + } + upstreamParsers = []string{} + for k, v := range itemMap { + clog = clog.WithFields(log.Fields{ + "file": v.Name, + }) + if !v.Installed { //only backup installed ones + clog.Debugf("[%s] : not installed", k) + continue + } + + //for the local/tainted ones, we backup the full file + if v.Tainted || v.Local || !v.UpToDate { + //we need to backup stages for parsers + if itemType == cwhub.PARSERS || itemType == cwhub.PARSERS_OVFLW { + fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage) + if err := os.MkdirAll(fstagedir, os.ModePerm); err != nil { + return fmt.Errorf("error while creating stage dir %s : %s", fstagedir, err) + } + } + clog.Debugf("[%s] : backuping file (tainted:%t local:%t up-to-date:%t)", k, v.Tainted, v.Local, v.UpToDate) + tfile := fmt.Sprintf("%s%s/%s", itemDirectory, v.Stage, v.FileName) + if err = types.CopyFile(v.LocalPath, tfile); err != nil { + return fmt.Errorf("failed copy %s %s to %s : %s", itemType, v.LocalPath, tfile, err) + } + clog.Infof("local/tainted saved %s to %s", v.LocalPath, tfile) + continue + } + clog.Debugf("[%s] : from hub, just backup name (up-to-date:%t)", k, v.UpToDate) + clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.UpToDate) + upstreamParsers = append(upstreamParsers, v.Name) + } + //write the upstream items + upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType) + upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") + if err != nil { + return fmt.Errorf("failed marshaling upstream parsers : %s", err) + } + err = os.WriteFile(upstreamParsersFname, upstreamParsersContent, 0644) + if err != nil { + return fmt.Errorf("unable to write to %s %s : %s", itemType, upstreamParsersFname, err) + } + clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname) + } + + return nil +} + +type unit struct { + value int64 + symbol string +} + +var ranges = []unit{ + { + value: 1e18, + symbol: "E", + }, + { + value: 1e15, + symbol: "P", + }, + { + value: 1e12, + symbol: "T", + }, + { + value: 1e6, + symbol: "M", + }, + { + value: 1e3, + symbol: "k", + }, + { + value: 1, + symbol: "", + }, +} + +func formatNumber(num int) string { + goodUnit := unit{} + for _, u := range ranges { + if int64(num) >= u.value { + goodUnit = u + break + } + } + + if goodUnit.value == 1 { + return fmt.Sprintf("%d%s", num, goodUnit.symbol) + } + + res := math.Round(float64(num)/float64(goodUnit.value)*100) / 100 + return fmt.Sprintf("%.2f%s", res, goodUnit.symbol) +} diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/utils_table.go new file mode 100644 index 0000000..aef1e94 --- /dev/null +++ b/cmd/crowdsec-cli/utils_table.go @@ -0,0 +1,66 @@ +package main + +import ( + "fmt" + "io" + + "github.com/aquasecurity/table" + "github.com/enescakir/emoji" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func listHubItemTable(out io.Writer, title string, statuses []cwhub.ItemHubStatus) { + t := newLightTable(out) + t.SetHeaders("Name", fmt.Sprintf("%v Status", emoji.Package), "Version", "Local Path") + t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + for _, status := range statuses { + t.AddRow(status.Name, status.UTF8_Status, status.LocalVersion, status.LocalPath) + } + renderTableTitle(out, title) + t.Render() +} + +func scenarioMetricsTable(out io.Writer, itemName string, metrics map[string]int) { + if metrics["instantiation"] == 0 { + return + } + t := newTable(out) + t.SetHeaders("Current Count", "Overflows", "Instantiated", "Poured", "Expired") + + t.AddRow( + fmt.Sprintf("%d", metrics["curr_count"]), + fmt.Sprintf("%d", metrics["overflow"]), + fmt.Sprintf("%d", metrics["instantiation"]), + fmt.Sprintf("%d", metrics["pour"]), + fmt.Sprintf("%d", metrics["underflow"]), + ) + + renderTableTitle(out, fmt.Sprintf("\n - (Scenario) %s:", itemName)) + t.Render() +} + +func parserMetricsTable(out io.Writer, itemName string, metrics map[string]map[string]int) { + skip := true + t := newTable(out) + t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") + + for source, stats := range metrics { + if stats["hits"] > 0 { + t.AddRow( + source, + fmt.Sprintf("%d", stats["hits"]), + fmt.Sprintf("%d", stats["parsed"]), + fmt.Sprintf("%d", stats["unparsed"]), + ) + skip = false + } + } + + if !skip { + renderTableTitle(out, fmt.Sprintf("\n - (Parser) %s:", itemName)) + t.Render() + } +} diff --git a/cmd/crowdsec/Makefile b/cmd/crowdsec/Makefile new file mode 100644 index 0000000..81ef562 --- /dev/null +++ b/cmd/crowdsec/Makefile @@ -0,0 +1,76 @@ +ifeq ($(OS), Windows_NT) + SHELL := pwsh.exe + .SHELLFLAGS := -NoProfile -Command + EXT = .exe +endif + +# Go parameters +GOCMD = go +GOBUILD = $(GOCMD) build +GOCLEAN = $(GOCMD) clean +GOTEST = $(GOCMD) test +GOGET = $(GOCMD) get + +CROWDSEC_BIN = crowdsec$(EXT) +# names longer than 15 chars break 'pgrep' +CROWDSEC_BIN_COVER = $(CROWDSEC_BIN).cover +PREFIX ?= "/" +CFG_PREFIX = $(PREFIX)"/etc/crowdsec/config/" +BIN_PREFIX = $(PREFIX)"/usr/local/bin/" +DATA_PREFIX = $(PREFIX)"/var/run/crowdsec/" +PID_DIR = $(PREFIX)"/var/run/" + +SYSTEMD_PATH_FILE = "/etc/systemd/system/crowdsec.service" + +.PHONY: all +all: clean test build + +build: clean + $(GOBUILD) $(LD_OPTS) $(BUILD_VENDOR_FLAGS) -o $(CROWDSEC_BIN) + +build-bincover: clean + $(GOTEST) . -tags testrunmain -coverpkg=$(go list github.com/crowdsecurity/crowdsec/... | grep -v -e 'pkg/database' -e 'plugins/notifications' -e 'pkg/protobufs' -e 'pkg/cwversions' -e 'pkg/cstest' -e 'pkg/models') -covermode=atomic $(LD_OPTS) -c -o $(CROWDSEC_BIN_COVER) + +test: + $(GOTEST) $(LD_OPTS) -v ./... + +clean: + @$(RM) $(CROWDSEC_BIN) $(CROWDSEC_BIN).test $(CROWDSEC_BIN_COVER) $(WIN_IGNORE_ERR) + +.PHONY: install +install: install-conf install-bin + +.PHONY: install-conf +install-conf: + mkdir -p $(DATA_PREFIX) || exit + (cd ../.. / && find ./data -type f -exec install -Dm 755 "{}" "$(DATA_PREFIX){}" \; && cd ./cmd/crowdsec) || exit + (cd ../../config && find ./patterns -type f -exec install -Dm 755 "{}" "$(CFG_PREFIX){}" \; && cd ../cmd/crowdsec) || exit + mkdir -p "$(CFG_PREFIX)" || exit + mkdir -p "$(CFG_PREFIX)/parsers" || exit + mkdir -p "$(CFG_PREFIX)/scenarios" || exit + mkdir -p "$(CFG_PREFIX)/postoverflows" || exit + mkdir -p "$(CFG_PREFIX)/collections" || exit + mkdir -p "$(CFG_PREFIX)/patterns" || exit + install -v -m 755 -D ../../config/prod.yaml "$(CFG_PREFIX)" || exit + install -v -m 755 -D ../../config/dev.yaml "$(CFG_PREFIX)" || exit + install -v -m 755 -D ../../config/acquis.yaml "$(CFG_PREFIX)" || exit + install -v -m 755 -D ../../config/profiles.yaml "$(CFG_PREFIX)" || exit + install -v -m 755 -D ../../config/api.yaml "$(CFG_PREFIX)" || exit + mkdir -p $(PID_DIR) || exit + PID=$(PID_DIR) DATA=$(DATA_PREFIX)"/data/" CFG=$(CFG_PREFIX) envsubst < ../../config/prod.yaml > $(CFG_PREFIX)"/default.yaml" + +.PHONY: install-bin +install-bin: + install -v -m 755 -D "$(CROWDSEC_BIN)" "$(BIN_PREFIX)/$(CROWDSEC_BIN)" || exit + +.PHONY: systemd"$(BIN_PREFI"$(BIN_PREFIX)/$(CROWDSEC_BIN)""$(BIN_PREFIX)/$(CROWDSEC_BIN)"X)/$(CROWDSEC_BIN)" +systemd: install + CFG=$(CFG_PREFIX) PID=$(PID_DIR) BIN=$(BIN_PREFIX)"/"$(CROWDSEC_BIN) envsubst < ../../config/crowdsec.service > "$(SYSTEMD_PATH_FILE)" + systemctl daemon-reload + +.PHONY: uninstall +uninstall: + $(RM) $(CFG_PREFIX) $(WIN_IGNORE_ERR) + $(RM) $(DATA_PREFIX) $(WIN_IGNORE_ERR) + $(RM) "$(BIN_PREFIX)/$(CROWDSEC_BIN)" $(WIN_IGNORE_ERR) + $(RM) "$(SYSTEMD_PATH_FILE)" $(WIN_IGNORE_ERR) diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go new file mode 100644 index 0000000..6f8e396 --- /dev/null +++ b/cmd/crowdsec/api.go @@ -0,0 +1,81 @@ +package main + +import ( + "runtime" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/apiserver" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) { + apiServer, err := apiserver.NewServer(cConfig.API.Server) + if err != nil { + return nil, errors.Wrap(err, "unable to run local API") + } + + if hasPlugins(cConfig.API.Server.Profiles) { + log.Info("initiating plugin broker") + //On windows, the plugins are always run as medium-integrity processes, so we don't care about plugin_config + if cConfig.PluginConfig == nil && runtime.GOOS != "windows" { + return nil, errors.New("plugins are enabled, but the plugin_config section is missing in the configuration") + } + if cConfig.ConfigPaths.NotificationDir == "" { + return nil, errors.New("plugins are enabled, but config_paths.notification_dir is not defined") + } + if cConfig.ConfigPaths.PluginDir == "" { + return nil, errors.New("plugins are enabled, but config_paths.plugin_dir is not defined") + } + err = pluginBroker.Init(cConfig.PluginConfig, cConfig.API.Server.Profiles, cConfig.ConfigPaths) + if err != nil { + return nil, errors.Wrap(err, "unable to run local API") + } + log.Info("initiated plugin broker") + apiServer.AttachPluginBroker(&pluginBroker) + } + + err = apiServer.InitController() + if err != nil { + return nil, errors.Wrap(err, "unable to run local API") + } + + return apiServer, nil +} + +func serveAPIServer(apiServer *apiserver.APIServer, apiReady chan bool) { + apiTomb.Go(func() error { + defer types.CatchPanic("crowdsec/serveAPIServer") + go func() { + defer types.CatchPanic("crowdsec/runAPIServer") + log.Debugf("serving API after %s ms", time.Since(crowdsecT0)) + if err := apiServer.Run(apiReady); err != nil { + log.Fatalf(err.Error()) + } + }() + + pluginTomb.Go(func() error { + pluginBroker.Run(&pluginTomb) + return nil + }) + + <-apiTomb.Dying() // lock until go routine is dying + pluginTomb.Kill(nil) + log.Infof("serve: shutting down api server") + if err := apiServer.Shutdown(); err != nil { + return err + } + return nil + }) +} + +func hasPlugins(profiles []*csconfig.ProfileCfg) bool { + for _, profile := range profiles { + if len(profile.Notifications) != 0 { + return true + } + } + return false +} diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go new file mode 100644 index 0000000..84cf083 --- /dev/null +++ b/cmd/crowdsec/crowdsec.go @@ -0,0 +1,253 @@ +package main + +import ( + "fmt" + "os" + "sync" + "time" + + "path/filepath" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +func initCrowdsec(cConfig *csconfig.Config) (*parser.Parsers, error) { + var err error + + // Populate cwhub package tools + if err := cwhub.GetHubIdx(cConfig.Hub); err != nil { + return &parser.Parsers{}, fmt.Errorf("Failed to load hub index : %s", err) + } + + // Start loading configs + csParsers := newParsers() + if csParsers, err = parser.LoadParsers(cConfig, csParsers); err != nil { + return &parser.Parsers{}, fmt.Errorf("Failed to load parsers: %s", err) + } + + if err := LoadBuckets(cConfig); err != nil { + return &parser.Parsers{}, fmt.Errorf("Failed to load scenarios: %s", err) + } + + if err := LoadAcquisition(cConfig); err != nil { + return &parser.Parsers{}, fmt.Errorf("Error while loading acquisition config : %s", err) + } + return csParsers, nil +} + +func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers) error { + inputLineChan := make(chan types.Event) + inputEventChan := make(chan types.Event) + + //start go-routines for parsing, buckets pour and outputs. + parserWg := &sync.WaitGroup{} + parsersTomb.Go(func() error { + parserWg.Add(1) + for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ { + parsersTomb.Go(func() error { + defer types.CatchPanic("crowdsec/runParse") + if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { //this error will never happen as parser.Parse is not able to return errors + log.Fatalf("starting parse error : %s", err) + return err + } + return nil + }) + } + parserWg.Done() + return nil + }) + parserWg.Wait() + + bucketWg := &sync.WaitGroup{} + bucketsTomb.Go(func() error { + bucketWg.Add(1) + /*restore previous state as well if present*/ + if cConfig.Crowdsec.BucketStateFile != "" { + log.Warningf("Restoring buckets state from %s", cConfig.Crowdsec.BucketStateFile) + if err := leaky.LoadBucketsState(cConfig.Crowdsec.BucketStateFile, buckets, holders); err != nil { + return fmt.Errorf("unable to restore buckets : %s", err) + } + } + + for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ { + bucketsTomb.Go(func() error { + defer types.CatchPanic("crowdsec/runPour") + if err := runPour(inputEventChan, holders, buckets, cConfig); err != nil { + log.Fatalf("starting pour error : %s", err) + return err + } + return nil + }) + } + bucketWg.Done() + return nil + }) + bucketWg.Wait() + + outputWg := &sync.WaitGroup{} + outputsTomb.Go(func() error { + outputWg.Add(1) + for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ { + outputsTomb.Go(func() error { + defer types.CatchPanic("crowdsec/runOutput") + if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, *cConfig.API.Client.Credentials); err != nil { + log.Fatalf("starting outputs error : %s", err) + return err + } + return nil + }) + } + outputWg.Done() + return nil + }) + outputWg.Wait() + + if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { + aggregated := false + if cConfig.Prometheus.Level == "aggregated" { + aggregated = true + } + if err := acquisition.GetMetrics(dataSources, aggregated); err != nil { + return errors.Wrap(err, "while fetching prometheus metrics for datasources.") + } + + } + log.Info("Starting processing data") + + if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil { + log.Fatalf("starting acquisition error : %s", err) + return err + } + + return nil +} + +func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, agentReady chan bool) { + crowdsecTomb.Go(func() error { + defer types.CatchPanic("crowdsec/serveCrowdsec") + go func() { + defer types.CatchPanic("crowdsec/runCrowdsec") + // this logs every time, even at config reload + log.Debugf("running agent after %s ms", time.Since(crowdsecT0)) + agentReady <- true + if err := runCrowdsec(cConfig, parsers); err != nil { + log.Fatalf("unable to start crowdsec routines: %s", err) + } + }() + + /*we should stop in two cases : + - crowdsecTomb has been Killed() : it might be shutdown or reload, so stop + - acquisTomb is dead, it means that we were in "cat" mode and files are done reading, quit + */ + waitOnTomb() + log.Debugf("Shutting down crowdsec routines") + if err := ShutdownCrowdsecRoutines(); err != nil { + log.Fatalf("unable to shutdown crowdsec routines: %s", err) + } + log.Debugf("everything is dead, return crowdsecTomb") + if dumpStates { + dumpParserState() + dumpOverflowState() + dumpBucketsPour() + os.Exit(0) + } + return nil + }) +} + +func dumpBucketsPour() { + fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + log.Fatalf("open: %s", err) + } + out, err := yaml.Marshal(leaky.BucketPourCache) + if err != nil { + log.Fatalf("marshal: %s", err) + } + b, err := fd.Write(out) + if err != nil { + log.Fatalf("write: %s", err) + } + log.Tracef("wrote %d bytes", b) + if err := fd.Close(); err != nil { + log.Fatalf(" close: %s", err) + } +} + +func dumpParserState() { + + fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + log.Fatalf("open: %s", err) + } + out, err := yaml.Marshal(parser.StageParseCache) + if err != nil { + log.Fatalf("marshal: %s", err) + } + b, err := fd.Write(out) + if err != nil { + log.Fatalf("write: %s", err) + } + log.Tracef("wrote %d bytes", b) + if err := fd.Close(); err != nil { + log.Fatalf(" close: %s", err) + } +} + +func dumpOverflowState() { + + fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + log.Fatalf("open: %s", err) + } + out, err := yaml.Marshal(bucketOverflows) + if err != nil { + log.Fatalf("marshal: %s", err) + } + b, err := fd.Write(out) + if err != nil { + log.Fatalf("write: %s", err) + } + log.Tracef("wrote %d bytes", b) + if err := fd.Close(); err != nil { + log.Fatalf(" close: %s", err) + } +} + +func waitOnTomb() { + for { + select { + case <-acquisTomb.Dead(): + /*if it's acquisition dying it means that we were in "cat" mode. + while shutting down, we need to give time for all buckets to process in flight data*/ + log.Warning("Acquisition is finished, shutting down") + /* + While it might make sense to want to shut-down parser/buckets/etc. as soon as acquisition is finished, + we might have some pending buckets: buckets that overflowed, but whose LeakRoutine are still alive because they + are waiting to be able to "commit" (push to api). This can happen specifically in a context where a lot of logs + are going to trigger overflow (ie. trigger buckets with ~100% of the logs triggering an overflow). + + To avoid this (which would mean that we would "lose" some overflows), let's monitor the number of live buckets. + However, because of the blackhole mechanism, we can't really wait for the number of LeakRoutine to go to zero + (we might have to wait $blackhole_duration). + + So: we are waiting for the number of buckets to stop decreasing before returning. "how long" we should wait + is a bit of the trick question, as some operations (ie. reverse dns or such in post-overflow) can take some time :) + */ + + return + + case <-crowdsecTomb.Dying(): + log.Infof("Crowdsec engine shutting down") + return + } + } +} diff --git a/cmd/crowdsec/event_log_hook_windows.go b/cmd/crowdsec/event_log_hook_windows.go new file mode 100644 index 0000000..bacc005 --- /dev/null +++ b/cmd/crowdsec/event_log_hook_windows.go @@ -0,0 +1,39 @@ +package main + +import ( + log "github.com/sirupsen/logrus" + "golang.org/x/sys/windows/svc/eventlog" +) + +type EventLogHook struct { + LogLevels []log.Level + evtlog *eventlog.Log +} + +func (e *EventLogHook) Fire(entry *log.Entry) error { + line, err := entry.String() + if err != nil { + return err + } + switch entry.Level { + case log.PanicLevel: + return e.evtlog.Error(300, line) + case log.FatalLevel: + return e.evtlog.Error(301, line) + case log.ErrorLevel: + return e.evtlog.Error(302, line) + case log.WarnLevel: + return e.evtlog.Warning(303, line) + case log.InfoLevel: + return e.evtlog.Info(304, line) + case log.DebugLevel: + return e.evtlog.Info(305, line) + case log.TraceLevel: + return e.evtlog.Info(306, line) + } + return nil +} + +func (e *EventLogHook) Levels() []log.Level { + return e.LogLevels +} diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go new file mode 100644 index 0000000..43eb63e --- /dev/null +++ b/cmd/crowdsec/main.go @@ -0,0 +1,328 @@ +package main + +import ( + "flag" + "fmt" + _ "net/http/pprof" + "os" + "runtime" + "sort" + "strings" + "time" + + "github.com/confluentinc/bincover" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +var ( + /*tombs for the parser, buckets and outputs.*/ + acquisTomb tomb.Tomb + parsersTomb tomb.Tomb + bucketsTomb tomb.Tomb + outputsTomb tomb.Tomb + apiTomb tomb.Tomb + crowdsecTomb tomb.Tomb + pluginTomb tomb.Tomb + + flags *Flags + + /*the state of acquisition*/ + dataSources []acquisition.DataSource + /*the state of the buckets*/ + holders []leakybucket.BucketFactory + buckets *leakybucket.Buckets + outputEventChan chan types.Event //the buckets init returns its own chan that is used for multiplexing + /*settings*/ + lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/ + pluginBroker csplugin.PluginBroker +) + +var bincoverTesting = "" + +type Flags struct { + ConfigFile string + TraceLevel bool + DebugLevel bool + InfoLevel bool + WarnLevel bool + PrintVersion bool + SingleFileType string + Labels map[string]string + OneShotDSN string + TestMode bool + DisableAgent bool + DisableAPI bool + WinSvc string + DisableCAPI bool +} + +type labelsMap map[string]string + +// Return new parsers +// nodes and povfwnodes are already initialized in parser.LoadStages +func newParsers() *parser.Parsers { + parsers := &parser.Parsers{ + Ctx: &parser.UnixParserCtx{}, + Povfwctx: &parser.UnixParserCtx{}, + StageFiles: make([]parser.Stagefile, 0), + PovfwStageFiles: make([]parser.Stagefile, 0), + } + for _, itemType := range []string{cwhub.PARSERS, cwhub.PARSERS_OVFLW} { + for _, hubParserItem := range cwhub.GetItemMap(itemType) { + if hubParserItem.Installed { + stagefile := parser.Stagefile{ + Filename: hubParserItem.LocalPath, + Stage: hubParserItem.Stage, + } + if itemType == cwhub.PARSERS { + parsers.StageFiles = append(parsers.StageFiles, stagefile) + } + if itemType == cwhub.PARSERS_OVFLW { + parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile) + } + } + } + } + if parsers.StageFiles != nil { + sort.Slice(parsers.StageFiles, func(i, j int) bool { + return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename + }) + } + if parsers.PovfwStageFiles != nil { + sort.Slice(parsers.PovfwStageFiles, func(i, j int) bool { + return parsers.PovfwStageFiles[i].Filename < parsers.PovfwStageFiles[j].Filename + }) + } + + return parsers +} + +func LoadBuckets(cConfig *csconfig.Config) error { + + var ( + err error + files []string + ) + for _, hubScenarioItem := range cwhub.GetItemMap(cwhub.SCENARIOS) { + if hubScenarioItem.Installed { + files = append(files, hubScenarioItem.LocalPath) + } + } + buckets = leakybucket.NewBuckets() + + log.Infof("Loading %d scenario files", len(files)) + holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, files, &bucketsTomb, buckets) + + if err != nil { + return fmt.Errorf("scenario loading failed: %v", err) + } + + if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { + for holderIndex := range holders { + holders[holderIndex].Profiling = true + } + } + return nil +} + +func LoadAcquisition(cConfig *csconfig.Config) error { + var err error + + if flags.SingleFileType != "" || flags.OneShotDSN != "" { + if flags.OneShotDSN == "" || flags.SingleFileType == "" { + return fmt.Errorf("-type requires a -dsn argument") + } + flags.Labels = labels + flags.Labels["type"] = flags.SingleFileType + + dataSources, err = acquisition.LoadAcquisitionFromDSN(flags.OneShotDSN, flags.Labels) + if err != nil { + return errors.Wrapf(err, "failed to configure datasource for %s", flags.OneShotDSN) + } + } else { + dataSources, err = acquisition.LoadAcquisitionFromFile(cConfig.Crowdsec) + if err != nil { + return err + } + } + + return nil +} + +var dumpFolder string +var dumpStates bool +var labels = make(labelsMap) + +func (l *labelsMap) String() string { + return "labels" +} + +func (l labelsMap) Set(label string) error { + split := strings.Split(label, ":") + if len(split) != 2 { + return errors.Wrapf(errors.New("Bad Format"), "for Label '%s'", label) + } + l[split[0]] = split[1] + return nil +} + +func (f *Flags) Parse() { + + flag.StringVar(&f.ConfigFile, "c", csconfig.DefaultConfigPath("config.yaml"), "configuration file") + flag.BoolVar(&f.TraceLevel, "trace", false, "VERY verbose") + flag.BoolVar(&f.DebugLevel, "debug", false, "print debug-level on stderr") + flag.BoolVar(&f.InfoLevel, "info", false, "print info-level on stderr") + flag.BoolVar(&f.WarnLevel, "warning", false, "print warning-level on stderr") + flag.BoolVar(&f.PrintVersion, "version", false, "display version") + flag.StringVar(&f.OneShotDSN, "dsn", "", "Process a single data source in time-machine") + flag.StringVar(&f.SingleFileType, "type", "", "Labels.type for file in time-machine") + flag.Var(&labels, "label", "Additional Labels for file in time-machine") + flag.BoolVar(&f.TestMode, "t", false, "only test configs") + flag.BoolVar(&f.DisableAgent, "no-cs", false, "disable crowdsec agent") + flag.BoolVar(&f.DisableAPI, "no-api", false, "disable local API") + flag.BoolVar(&f.DisableCAPI, "no-capi", false, "disable communication with Central API") + flag.StringVar(&f.WinSvc, "winsvc", "", "Windows service Action : Install, Remove etc..") + flag.StringVar(&dumpFolder, "dump-data", "", "dump parsers/buckets raw outputs") + flag.Parse() +} + +// LoadConfig returns a configuration parsed from configuration file +func LoadConfig(cConfig *csconfig.Config) error { + if dumpFolder != "" { + parser.ParseDump = true + parser.DumpFolder = dumpFolder + leakybucket.BucketPourTrack = true + dumpStates = true + } + + if !flags.DisableAgent { + if err := cConfig.LoadCrowdsec(); err != nil { + return err + } + } + + if !flags.DisableAPI { + if err := cConfig.LoadAPIServer(); err != nil { + return err + } + } + + if !cConfig.DisableAgent && (cConfig.API == nil || cConfig.API.Client == nil || cConfig.API.Client.Credentials == nil) { + return errors.New("missing local API credentials for crowdsec agent, abort") + } + + if cConfig.DisableAPI && cConfig.DisableAgent { + return errors.New("You must run at least the API Server or crowdsec") + } + + if flags.WarnLevel { + logLevel := log.WarnLevel + cConfig.Common.LogLevel = &logLevel + } + if flags.InfoLevel || cConfig.Common.LogLevel == nil { + logLevel := log.InfoLevel + cConfig.Common.LogLevel = &logLevel + } + if flags.DebugLevel { + logLevel := log.DebugLevel + cConfig.Common.LogLevel = &logLevel + } + if flags.TraceLevel { + logLevel := log.TraceLevel + cConfig.Common.LogLevel = &logLevel + } + + if flags.TestMode && !cConfig.DisableAgent { + cConfig.Crowdsec.LintOnly = true + } + + if flags.SingleFileType != "" && flags.OneShotDSN != "" { + if cConfig.API != nil && cConfig.API.Server != nil { + cConfig.API.Server.OnlineClient = nil + } + /*if the api is disabled as well, just read file and exit, don't daemonize*/ + if flags.DisableAPI { + cConfig.Common.Daemonize = false + } + cConfig.Common.LogMedia = "stdout" + log.Infof("single file mode : log_media=%s daemonize=%t", cConfig.Common.LogMedia, cConfig.Common.Daemonize) + } + + if cConfig.Common.PidDir != "" { + log.Warn("Deprecation warning: the pid_dir config can be safely removed and is not required") + } + + if cConfig.Common.Daemonize && runtime.GOOS == "windows" { + log.Debug("Daemonization is not supported on Windows, disabling") + cConfig.Common.Daemonize = false + } + + return nil +} + +// exitWithCode must be called right before the program termination, +// to allow measuring functional test coverage in case of abnormal exit. +// +// without bincover: log error and exit with code +// with bincover: log error and tell bincover the exit code, then return +func exitWithCode(exitCode int, err error) { + if err != nil { + // this method of logging a fatal error does not + // trigger a program exit (as stated by the authors, it + // is not going to change in logrus to keep backward + // compatibility), and allows us to report coverage. + log.NewEntry(log.StandardLogger()).Log(log.FatalLevel, err) + } + if bincoverTesting == "" { + os.Exit(exitCode) + } + bincover.ExitCode = exitCode +} + +// crowdsecT0 can be used to measure start time of services, +// or uptime of the application +var crowdsecT0 time.Time + +func main() { + crowdsecT0 = time.Now() + + defer types.CatchPanic("crowdsec/main") + + log.Debugf("os.Args: %v", os.Args) + + // Handle command line arguments + flags = &Flags{} + flags.Parse() + + if len(flag.Args()) > 0 { + fmt.Fprintf(os.Stderr, "argument provided but not defined: %s\n", flag.Args()[0]) + flag.Usage() + // the flag package exits with 2 in case of unknown flag + exitWithCode(2, nil) + return + } + + if flags.PrintVersion { + cwversion.Show() + exitWithCode(0, nil) + return + } + + exitCode := 0 + err := StartRunSvc() + if err != nil { + exitCode = 1 + } + exitWithCode(exitCode, err) +} diff --git a/cmd/crowdsec/main_test.go b/cmd/crowdsec/main_test.go new file mode 100644 index 0000000..da7241e --- /dev/null +++ b/cmd/crowdsec/main_test.go @@ -0,0 +1,13 @@ +//go:build testrunmain + +package main + +import ( + "github.com/confluentinc/bincover" + + "testing" +) + +func TestBincoverRunMain(t *testing.T) { + bincover.RunTest(main) +} diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go new file mode 100644 index 0000000..e019158 --- /dev/null +++ b/cmd/crowdsec/metrics.go @@ -0,0 +1,190 @@ +package main + +import ( + "fmt" + "time" + + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "net/http" + + log "github.com/sirupsen/logrus" +) + +/*prometheus*/ +var globalParserHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_parser_hits_total", + Help: "Total events entered the parser.", + }, + []string{"source", "type"}, +) +var globalParserHitsOk = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_parser_hits_ok_total", + Help: "Total events were successfully parsed.", + }, + []string{"source", "type"}, +) +var globalParserHitsKo = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_parser_hits_ko_total", + Help: "Total events were unsuccessfully parsed.", + }, + []string{"source", "type"}, +) + +var globalBucketPourKo = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "cs_bucket_pour_ko_total", + Help: "Total events were not poured in a bucket.", + }, +) + +var globalBucketPourOk = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "cs_bucket_pour_ok_total", + Help: "Total events were poured in at least one bucket.", + }, +) + +var globalCsInfo = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "cs_info", + Help: "Information about Crowdsec.", + ConstLabels: prometheus.Labels{"version": cwversion.VersionStr()}, + }, +) + +var globalActiveDecisions = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "cs_active_decisions", + Help: "Number of active decisions.", + }, + []string{"reason", "origin", "action"}, +) + +var globalAlerts = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "cs_alerts", + Help: "Number of alerts (excluding CAPI).", + }, + []string{"reason"}, +) + +var globalParsingHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Help: "Time spent parsing a line", + Name: "cs_parsing_time_seconds", + Buckets: []float64{0.0005, 0.001, 0.0015, 0.002, 0.0025, 0.003, 0.004, 0.005, 0.0075, 0.01}, + }, + []string{"type", "source"}, +) + +var globalPourHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "cs_bucket_pour_seconds", + Help: "Time spent pouring an event to buckets.", + Buckets: []float64{0.001, 0.002, 0.005, 0.01, 0.015, 0.02, 0.03, 0.04, 0.05}, + }, + []string{"type", "source"}, +) + +func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if dbClient == nil { + next.ServeHTTP(w, r) + return + } + + decisionsFilters := make(map[string][]string, 0) + decisions, err := dbClient.QueryDecisionCountByScenario(decisionsFilters) + if err != nil { + log.Errorf("Error querying decisions for metrics: %v", err) + next.ServeHTTP(w, r) + return + } + globalActiveDecisions.Reset() + for _, d := range decisions { + globalActiveDecisions.With(prometheus.Labels{"reason": d.Scenario, "origin": d.Origin, "action": d.Type}).Set(float64(d.Count)) + } + + globalAlerts.Reset() + + alertsFilter := map[string][]string{ + "include_capi": {"false"}, + } + + alerts, err := dbClient.AlertsCountPerScenario(alertsFilter) + + if err != nil { + log.Errorf("Error querying alerts for metrics: %v", err) + next.ServeHTTP(w, r) + return + } + + for k, v := range alerts { + globalAlerts.With(prometheus.Labels{"reason": k}).Set(float64(v)) + } + + next.ServeHTTP(w, r) + }) +} + +func registerPrometheus(config *csconfig.PrometheusCfg) { + if !config.Enabled { + return + } + if config.ListenAddr == "" { + log.Warning("prometheus is enabled, but the listen address is empty, using '127.0.0.1'") + config.ListenAddr = "127.0.0.1" + } + if config.ListenPort == 0 { + log.Warning("prometheus is enabled, but the listen port is empty, using '6060'") + config.ListenPort = 6060 + } + + // Registering prometheus + // If in aggregated mode, do not register events associated with a source, to keep the cardinality low + if config.Level == "aggregated" { + log.Infof("Loading aggregated prometheus collectors") + prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo, + globalCsInfo, globalParsingHistogram, globalPourHistogram, + leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, + v1.LapiRouteHits, + leaky.BucketsCurrentCount) + } else { + log.Infof("Loading prometheus collectors") + prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo, + parser.NodesHits, parser.NodesHitsOk, parser.NodesHitsKo, + globalCsInfo, globalParsingHistogram, globalPourHistogram, + v1.LapiRouteHits, v1.LapiMachineHits, v1.LapiBouncerHits, v1.LapiNilDecisions, v1.LapiNonNilDecisions, v1.LapiResponseTime, + leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, leaky.BucketsCurrentCount, + globalActiveDecisions, globalAlerts) + + } +} + +func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, apiReady chan bool, agentReady chan bool) { + if !config.Enabled { + return + } + + defer types.CatchPanic("crowdsec/servePrometheus") + + http.Handle("/metrics", computeDynamicMetrics(promhttp.Handler(), dbClient)) + <-apiReady + <-agentReady + log.Debugf("serving metrics after %s ms", time.Since(crowdsecT0)) + if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.ListenAddr, config.ListenPort), nil); err != nil { + log.Warningf("prometheus: %s", err) + } +} diff --git a/cmd/crowdsec/output.go b/cmd/crowdsec/output.go new file mode 100644 index 0000000..52bc04e --- /dev/null +++ b/cmd/crowdsec/output.go @@ -0,0 +1,174 @@ +package main + +import ( + "context" + "fmt" + "net/url" + "sync" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { + + var dedupCache []*models.Alert + + for idx, alert := range alerts { + log.Tracef("alert %d/%d", idx, len(alerts)) + /*if we have more than one source, we need to dedup */ + if len(alert.Sources) == 0 || len(alert.Sources) == 1 { + dedupCache = append(dedupCache, alert.Alert) + continue + } + for k, src := range alert.Sources { + refsrc := *alert.Alert //copy + log.Tracef("source[%s]", k) + refsrc.Source = &src + dedupCache = append(dedupCache, &refsrc) + } + } + if len(dedupCache) != len(alerts) { + log.Tracef("went from %d to %d alerts", len(alerts), len(dedupCache)) + } + return dedupCache, nil +} + +func PushAlerts(alerts []types.RuntimeAlert, client *apiclient.ApiClient) error { + ctx := context.Background() + alertsToPush, err := dedupAlerts(alerts) + + if err != nil { + return errors.Wrap(err, "failed to transform alerts for api") + } + _, _, err = client.Alerts.Add(ctx, alertsToPush) + if err != nil { + return errors.Wrap(err, "failed sending alert to LAPI") + } + return nil +} + +var bucketOverflows []types.Event + +func runOutput(input chan types.Event, overflow chan types.Event, buckets *leaky.Buckets, + postOverflowCTX parser.UnixParserCtx, postOverflowNodes []parser.Node, apiConfig csconfig.ApiCredentialsCfg) error { + + var err error + ticker := time.NewTicker(1 * time.Second) + + var cache []types.RuntimeAlert + var cacheMutex sync.Mutex + + scenarios, err := cwhub.GetInstalledScenariosAsString() + if err != nil { + return errors.Wrapf(err, "loading list of installed hub scenarios: %s", err) + } + + apiURL, err := url.Parse(apiConfig.URL) + if err != nil { + return errors.Wrapf(err, "parsing api url ('%s'): %s", apiConfig.URL, err) + } + + password := strfmt.Password(apiConfig.Password) + + Client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: apiConfig.Login, + Password: password, + Scenarios: scenarios, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + UpdateScenario: cwhub.GetInstalledScenariosAsString, + }) + if err != nil { + return errors.Wrapf(err, "new client api: %s", err) + } + if _, err = Client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + MachineID: &apiConfig.Login, + Password: &password, + Scenarios: scenarios, + }); err != nil { + return errors.Wrapf(err, "authenticate watcher (%s)", apiConfig.Login) + } + //start the heartbeat service + log.Debugf("Starting HeartBeat service") + Client.HeartBeat.StartHeartBeat(context.Background(), &outputsTomb) +LOOP: + for { + select { + case <-ticker.C: + if len(cache) > 0 { + cacheMutex.Lock() + cachecopy := cache + newcache := make([]types.RuntimeAlert, 0) + cache = newcache + cacheMutex.Unlock() + if err := PushAlerts(cachecopy, Client); err != nil { + log.Errorf("while pushing to api : %s", err) + //just push back the events to the queue + cacheMutex.Lock() + cache = append(cache, cachecopy...) + cacheMutex.Unlock() + } + } + case <-outputsTomb.Dying(): + if len(cache) > 0 { + cacheMutex.Lock() + cachecopy := cache + cacheMutex.Unlock() + if err := PushAlerts(cachecopy, Client); err != nil { + log.Errorf("while pushing leftovers to api : %s", err) + } + } + break LOOP + case event := <-overflow: + //if the Alert is nil, it's to signal bucket is ready for GC, don't track this + if dumpStates && event.Overflow.Alert != nil { + if bucketOverflows == nil { + bucketOverflows = make([]types.Event, 0) + } + bucketOverflows = append(bucketOverflows, event) + } + /*if alert is empty and mapKey is present, the overflow is just to cleanup bucket*/ + if event.Overflow.Alert == nil && event.Overflow.Mapkey != "" { + buckets.Bucket_map.Delete(event.Overflow.Mapkey) + break + } + /* process post overflow parser nodes */ + event, err := parser.Parse(postOverflowCTX, event, postOverflowNodes) + if err != nil { + return fmt.Errorf("postoverflow failed : %s", err) + } + log.Printf("%s", *event.Overflow.Alert.Message) + if event.Overflow.Whitelisted { + log.Printf("[%s] is whitelisted, skip.", *event.Overflow.Alert.Message) + continue + } + if event.Overflow.Reprocess { + log.Debugf("Overflow being reprocessed.") + input <- event + } + if dumpStates { + continue + } + + cacheMutex.Lock() + cache = append(cache, event.Overflow) + cacheMutex.Unlock() + } + } + + ticker.Stop() + return nil + +} diff --git a/cmd/crowdsec/parse.go b/cmd/crowdsec/parse.go new file mode 100644 index 0000000..aa93b6e --- /dev/null +++ b/cmd/crowdsec/parse.go @@ -0,0 +1,53 @@ +package main + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func runParse(input chan types.Event, output chan types.Event, parserCTX parser.UnixParserCtx, nodes []parser.Node) error { + +LOOP: + for { + select { + case <-parsersTomb.Dying(): + log.Infof("Killing parser routines") + break LOOP + case event := <-input: + if !event.Process { + continue + } + if event.Line.Module == "" { + log.Errorf("empty event.Line.Module field, the acquisition module must set it ! : %+v", event.Line) + continue + } + globalParserHits.With(prometheus.Labels{"source": event.Line.Src, "type": event.Line.Module}).Inc() + + startParsing := time.Now() + /* parse the log using magic */ + parsed, err := parser.Parse(parserCTX, event, nodes) + if err != nil { + log.Errorf("failed parsing : %v\n", err) + } + elapsed := time.Since(startParsing) + globalParsingHistogram.With(prometheus.Labels{"source": event.Line.Src, "type": event.Line.Module}).Observe(elapsed.Seconds()) + if !parsed.Process { + globalParserHitsKo.With(prometheus.Labels{"source": event.Line.Src, "type": event.Line.Module}).Inc() + log.Debugf("Discarding line %+v", parsed) + continue + } + globalParserHitsOk.With(prometheus.Labels{"source": event.Line.Src, "type": event.Line.Module}).Inc() + if parsed.Whitelisted { + log.Debugf("event whitelisted, discard") + continue + } + output <- parsed + } + } + return nil +} diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go new file mode 100644 index 0000000..23bdecb --- /dev/null +++ b/cmd/crowdsec/pour.go @@ -0,0 +1,64 @@ +package main + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" +) + +func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *leaky.Buckets, cConfig *csconfig.Config) error { + var ( + count int + ) + for { + //bucket is now ready + select { + case <-bucketsTomb.Dying(): + log.Infof("Bucket routine exiting") + return nil + case parsed := <-input: + startTime := time.Now() + count++ + if count%5000 == 0 { + log.Infof("%d existing buckets", leaky.LeakyRoutineCount) + //when in forensics mode, garbage collect buckets + if cConfig.Crowdsec.BucketsGCEnabled { + if parsed.MarshaledTime != "" { + var z *time.Time = &time.Time{} + if err := z.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { + log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err) + } else { + log.Warning("Starting buckets garbage collection ...") + if err = leaky.GarbageCollectBuckets(*z, buckets); err != nil { + return fmt.Errorf("failed to start bucket GC : %s", err) + } + } + } + } + } + //here we can bucketify with parsed + poured, err := leaky.PourItemToHolders(parsed, holders, buckets) + if err != nil { + log.Errorf("bucketify failed for: %v", parsed) + return fmt.Errorf("process of event failed : %v", err) + } + elapsed := time.Since(startTime) + globalPourHistogram.With(prometheus.Labels{"type": parsed.Line.Module, "source": parsed.Line.Src}).Observe(elapsed.Seconds()) + if poured { + globalBucketPourOk.Inc() + } else { + globalBucketPourKo.Inc() + } + if len(parsed.MarshaledTime) != 0 { + if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { + log.Warningf("failed to unmarshal time from event : %s", err) + } + } + } + } +} diff --git a/cmd/crowdsec/run_in_svc.go b/cmd/crowdsec/run_in_svc.go new file mode 100644 index 0000000..40df6c2 --- /dev/null +++ b/cmd/crowdsec/run_in_svc.go @@ -0,0 +1,71 @@ +//go:build linux || freebsd || netbsd || openbsd || solaris || !windows +// +build linux freebsd netbsd openbsd solaris !windows + +package main + +import ( + "os" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/writer" +) + +func StartRunSvc() error { + var ( + cConfig *csconfig.Config + err error + ) + + // Set a default logger with level=fatal on stderr, + // in addition to the one we configure afterwards + log.AddHook(&writer.Hook{ + Writer: os.Stderr, + LogLevels: []log.Level{ + log.PanicLevel, + log.FatalLevel, + }, + }) + + cConfig, err = csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI) + if err != nil { + return err + } + if err := LoadConfig(cConfig); err != nil { + return err + } + // Configure logging + if err = types.SetDefaultLoggerConfig(cConfig.Common.LogMedia, cConfig.Common.LogDir, *cConfig.Common.LogLevel, + cConfig.Common.LogMaxSize, cConfig.Common.LogMaxFiles, cConfig.Common.LogMaxAge, cConfig.Common.CompressLogs, cConfig.Common.ForceColorLogs); err != nil { + log.Fatal(err.Error()) + } + + log.Infof("Crowdsec %s", cwversion.VersionStr()) + + if bincoverTesting != "" { + log.Debug("coverage report is enabled") + } + + apiReady := make(chan bool, 1) + agentReady := make(chan bool, 1) + + // Enable profiling early + if cConfig.Prometheus != nil { + var dbClient *database.Client + var err error + + if cConfig.DbConfig != nil { + dbClient, err = database.NewClient(cConfig.DbConfig) + + if err != nil { + log.Fatalf("unable to create database client: %s", err) + } + } + registerPrometheus(cConfig.Prometheus) + go servePrometheus(cConfig.Prometheus, dbClient, apiReady, agentReady) + } + return Serve(cConfig, apiReady, agentReady) +} diff --git a/cmd/crowdsec/run_in_svc_windows.go b/cmd/crowdsec/run_in_svc_windows.go new file mode 100644 index 0000000..caa1b96 --- /dev/null +++ b/cmd/crowdsec/run_in_svc_windows.go @@ -0,0 +1,99 @@ +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "golang.org/x/sys/windows/svc" +) + +func StartRunSvc() error { + const svcName = "CrowdSec" + const svcDescription = "Crowdsec IPS/IDS" + + isRunninginService, err := svc.IsWindowsService() + if err != nil { + return errors.Wrap(err, "failed to determine if we are running in windows service mode") + } + if isRunninginService { + return runService(svcName) + } + + if flags.WinSvc == "Install" { + err = installService(svcName, svcDescription) + if err != nil { + return errors.Wrapf(err, "failed to %s %s", flags.WinSvc, svcName) + } + } else if flags.WinSvc == "Remove" { + err = removeService(svcName) + if err != nil { + return errors.Wrapf(err, "failed to %s %s", flags.WinSvc, svcName) + } + } else if flags.WinSvc == "Start" { + err = startService(svcName) + if err != nil { + return errors.Wrapf(err, "failed to %s %s", flags.WinSvc, svcName) + } + } else if flags.WinSvc == "Stop" { + err = controlService(svcName, svc.Stop, svc.Stopped) + if err != nil { + return errors.Wrapf(err, "failed to %s %s", flags.WinSvc, svcName) + } + } else if flags.WinSvc == "" { + return WindowsRun() + } else { + return fmt.Errorf("Invalid value for winsvc parameter: %s", flags.WinSvc) + } + return nil +} + +func WindowsRun() error { + var ( + cConfig *csconfig.Config + err error + ) + + cConfig, err = csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI) + if err != nil { + return err + } + if err := LoadConfig(cConfig); err != nil { + return err + } + // Configure logging + if err = types.SetDefaultLoggerConfig(cConfig.Common.LogMedia, cConfig.Common.LogDir, *cConfig.Common.LogLevel, + cConfig.Common.LogMaxSize, cConfig.Common.LogMaxFiles, cConfig.Common.LogMaxAge, cConfig.Common.CompressLogs, cConfig.Common.ForceColorLogs); err != nil { + return err + } + + log.Infof("Crowdsec %s", cwversion.VersionStr()) + + if bincoverTesting != "" { + log.Debug("coverage report is enabled") + } + + apiReady := make(chan bool, 1) + agentReady := make(chan bool, 1) + + // Enable profiling early + if cConfig.Prometheus != nil { + var dbClient *database.Client + var err error + + if cConfig.DbConfig != nil { + dbClient, err = database.NewClient(cConfig.DbConfig) + + if err != nil { + log.Fatalf("unable to create database client: %s", err) + } + } + registerPrometheus(cConfig.Prometheus) + go servePrometheus(cConfig.Prometheus, dbClient, apiReady, agentReady) + } + return Serve(cConfig, apiReady, agentReady) +} diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go new file mode 100644 index 0000000..d309440 --- /dev/null +++ b/cmd/crowdsec/serve.go @@ -0,0 +1,357 @@ +package main + +import ( + "os" + "os/signal" + "syscall" + "time" + + "github.com/coreos/go-systemd/daemon" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +//nolint: deadcode,unused // debugHandler is kept as a dev convenience: it shuts down and serialize internal state +func debugHandler(sig os.Signal, cConfig *csconfig.Config) error { + var ( + tmpFile string + err error + ) + + // stop goroutines + if err = ShutdownCrowdsecRoutines(); err != nil { + log.Warningf("Failed to shut down routines: %s", err) + } + + // todo: properly stop acquis with the tail readers + if tmpFile, err = leaky.DumpBucketsStateAt(time.Now().UTC(), cConfig.Crowdsec.BucketStateDumpDir, buckets); err != nil { + log.Warningf("Failed to dump bucket state : %s", err) + } + + if err := leaky.ShutdownAllBuckets(buckets); err != nil { + log.Warningf("Failed to shut down routines : %s", err) + } + log.Printf("Shutdown is finished, buckets are in %s", tmpFile) + return nil +} + +func reloadHandler(sig os.Signal) (*csconfig.Config, error) { + var tmpFile string + + // re-initialize tombs + acquisTomb = tomb.Tomb{} + parsersTomb = tomb.Tomb{} + bucketsTomb = tomb.Tomb{} + outputsTomb = tomb.Tomb{} + apiTomb = tomb.Tomb{} + crowdsecTomb = tomb.Tomb{} + pluginTomb = tomb.Tomb{} + + cConfig, err := csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI) + if err != nil { + return nil, err + } + + if err = LoadConfig(cConfig); err != nil { + return nil, err + } + // Configure logging + if err = types.SetDefaultLoggerConfig(cConfig.Common.LogMedia, + cConfig.Common.LogDir, *cConfig.Common.LogLevel, + cConfig.Common.LogMaxSize, cConfig.Common.LogMaxFiles, + cConfig.Common.LogMaxAge, cConfig.Common.CompressLogs, + cConfig.Common.ForceColorLogs); err != nil { + return nil, err + } + + if !cConfig.DisableAPI { + if flags.DisableCAPI { + log.Warningf("Communication with CrowdSec Central API disabled from args") + cConfig.API.Server.OnlineClient = nil + } + apiServer, err := initAPIServer(cConfig) + if err != nil { + return nil, errors.Wrap(err, "unable to init api server") + } + + apiReady := make(chan bool, 1) + serveAPIServer(apiServer, apiReady) + } + + if !cConfig.DisableAgent { + csParsers, err := initCrowdsec(cConfig) + if err != nil { + return nil, errors.Wrap(err, "unable to init crowdsec") + } + + // restore bucket state + if tmpFile != "" { + log.Warningf("we are now using %s as a state file", tmpFile) + cConfig.Crowdsec.BucketStateFile = tmpFile + } + + // reload the simulation state + if err := cConfig.LoadSimulation(); err != nil { + log.Errorf("reload error (simulation) : %s", err) + } + + agentReady := make(chan bool, 1) + serveCrowdsec(csParsers, cConfig, agentReady) + } + + log.Printf("Reload is finished") + // delete the tmp file, it's safe now :) + if tmpFile != "" { + if err := os.Remove(tmpFile); err != nil { + log.Warningf("Failed to delete temp file (%s) : %s", tmpFile, err) + } + } + return cConfig, nil +} + +func ShutdownCrowdsecRoutines() error { + var reterr error + + log.Debugf("Shutting down crowdsec sub-routines") + if len(dataSources) > 0 { + acquisTomb.Kill(nil) + log.Debugf("waiting for acquisition to finish") + + if err := acquisTomb.Wait(); err != nil { + log.Warningf("Acquisition returned error : %s", err) + reterr = err + } + } + + log.Debugf("acquisition is finished, wait for parser/bucket/ouputs.") + parsersTomb.Kill(nil) + if err := parsersTomb.Wait(); err != nil { + log.Warningf("Parsers returned error : %s", err) + reterr = err + } + + log.Debugf("parsers is done") + time.Sleep(1 * time.Second) // ugly workaround for now to ensure PourItemtoholders are finished + bucketsTomb.Kill(nil) + + if err := bucketsTomb.Wait(); err != nil { + log.Warningf("Buckets returned error : %s", err) + reterr = err + } + + log.Debugf("buckets is done") + time.Sleep(1 * time.Second) // ugly workaround for now + outputsTomb.Kill(nil) + + if err := outputsTomb.Wait(); err != nil { + log.Warningf("Ouputs returned error : %s", err) + reterr = err + } + + log.Debugf("outputs are done") + // He's dead, Jim. + crowdsecTomb.Kill(nil) + + return reterr +} + +func shutdownAPI() error { + log.Debugf("shutting down api via Tomb") + apiTomb.Kill(nil) + + if err := apiTomb.Wait(); err != nil { + return err + } + + log.Debugf("done") + return nil +} + +func shutdownCrowdsec() error { + log.Debugf("shutting down crowdsec via Tomb") + crowdsecTomb.Kill(nil) + + if err := crowdsecTomb.Wait(); err != nil { + return err + } + + log.Debugf("done") + return nil +} + +func shutdown(sig os.Signal, cConfig *csconfig.Config) error { + if !cConfig.DisableAgent { + if err := shutdownCrowdsec(); err != nil { + return errors.Wrap(err, "failed to shut down crowdsec") + } + } + + if !cConfig.DisableAPI { + if err := shutdownAPI(); err != nil { + return errors.Wrap(err, "failed to shut down api routines") + } + } + + return nil +} + +func HandleSignals(cConfig *csconfig.Config) error { + var ( + newConfig *csconfig.Config + err error + ) + + signalChan := make(chan os.Signal, 1) + + // We add os.Interrupt mostly to ease windows development, + // it allows to simulate a clean shutdown when running in the console + signal.Notify(signalChan, + syscall.SIGHUP, + syscall.SIGTERM, + os.Interrupt) + + exitChan := make(chan error) + + go func() { + defer types.CatchPanic("crowdsec/HandleSignals") + Loop: + for { + s := <-signalChan + switch s { + // kill -SIGHUP XXXX + case syscall.SIGHUP: + log.Warning("SIGHUP received, reloading") + + if err = shutdown(s, cConfig); err != nil { + exitChan <- errors.Wrap(err, "failed shutdown") + + break Loop + } + + if newConfig, err = reloadHandler(s); err != nil { + exitChan <- errors.Wrap(err, "reload handler failure") + + break Loop + } + + if newConfig != nil { + cConfig = newConfig + } + // ctrl+C, kill -SIGINT XXXX, kill -SIGTERM XXXX + case os.Interrupt, syscall.SIGTERM: + log.Warning("SIGTERM received, shutting down") + if err = shutdown(s, cConfig); err != nil { + exitChan <- errors.Wrap(err, "failed shutdown") + + break Loop + } + exitChan <- nil + } + } + }() + + err = <-exitChan + if err == nil { + log.Warning("Crowdsec service shutting down") + } + return err +} + +func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) error { + acquisTomb = tomb.Tomb{} + parsersTomb = tomb.Tomb{} + bucketsTomb = tomb.Tomb{} + outputsTomb = tomb.Tomb{} + apiTomb = tomb.Tomb{} + crowdsecTomb = tomb.Tomb{} + pluginTomb = tomb.Tomb{} + + if cConfig.API.Server != nil && cConfig.API.Server.DbConfig != nil { + dbClient, err := database.NewClient(cConfig.API.Server.DbConfig) + if err != nil { + return errors.Wrap(err, "failed to get database client") + } + + err = exprhelpers.Init(dbClient) + if err != nil { + return errors.Wrap(err, "failed to init expr helpers") + } + } else { + err := exprhelpers.Init(nil) + if err != nil { + return errors.Wrap(err, "failed to init expr helpers") + } + + log.Warningln("Exprhelpers loaded without database client.") + } + + if !cConfig.DisableAPI { + if cConfig.API.Server.OnlineClient == nil || cConfig.API.Server.OnlineClient.Credentials == nil { + log.Warningf("Communication with CrowdSec Central API disabled from configuration file") + } + + if flags.DisableCAPI { + log.Warningf("Communication with CrowdSec Central API disabled from args") + cConfig.API.Server.OnlineClient = nil + } + + apiServer, err := initAPIServer(cConfig) + if err != nil { + return errors.Wrap(err, "api server init") + } + + if !flags.TestMode { + serveAPIServer(apiServer, apiReady) + } + } else { + apiReady <- true + } + + if !cConfig.DisableAgent { + csParsers, err := initCrowdsec(cConfig) + if err != nil { + return errors.Wrap(err, "crowdsec init") + } + + // if it's just linting, we're done + if !flags.TestMode { + serveCrowdsec(csParsers, cConfig, agentReady) + } + } else { + agentReady <- true + } + + if flags.TestMode { + log.Infof("test done") + pluginBroker.Kill() + os.Exit(0) + } + + if cConfig.Common != nil && cConfig.Common.Daemonize { + sent, err := daemon.SdNotify(false, daemon.SdNotifyReady) + if !sent || err != nil { + log.Errorf("Failed to notify(sent: %v): %v", sent, err) + } + + // wait for signals + return HandleSignals(cConfig) + } + + for { + select { + case <-apiTomb.Dead(): + log.Infof("api shutdown") + return nil + case <-crowdsecTomb.Dead(): + log.Infof("crowdsec shutdown") + return nil + } + } +} diff --git a/cmd/crowdsec/win_service.go b/cmd/crowdsec/win_service.go new file mode 100644 index 0000000..94dfbf4 --- /dev/null +++ b/cmd/crowdsec/win_service.go @@ -0,0 +1,125 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package main + +import ( + "syscall" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/eventlog" +) + +type crowdsec_winservice struct { + config *csconfig.Config +} + +func (m *crowdsec_winservice) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) { + const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown + changes <- svc.Status{State: svc.StartPending} + tick := time.Tick(500 * time.Millisecond) + changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted} + + go func() { + loop: + for { + select { + case <-tick: + + case c := <-r: + switch c.Cmd { + case svc.Interrogate: + changes <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + changes <- svc.Status{State: svc.StopPending} + err := shutdown(nil, m.config) + if err != nil { + log.Errorf("Error while shutting down: %s", err) + //don't return, we still want to notify windows that we are stopped ? + } + break loop + default: + log.Errorf("unexpected control request #%d", c) + } + } + } + }() + + err := WindowsRun() + changes <- svc.Status{State: svc.Stopped} + if err != nil { + log.Fatalf(err.Error()) + } + return +} + +func runService(name string) error { + + //All the calls to logging before the logger is configured are pretty much useless, but we keep them for clarity + err := eventlog.InstallAsEventCreate("CrowdSec", eventlog.Error|eventlog.Warning|eventlog.Info) + if err != nil { + if errno, ok := err.(syscall.Errno); ok { + if errno == windows.ERROR_ACCESS_DENIED { + log.Warnf("Access denied when installing event source, running as non-admin ?") + } else { + log.Warnf("Failed to install event log: %s (%d)", err, errno) + } + } else { + log.Warnf("Failed to install event log: %s", err) + } + } + + //Let's use our source even if we could not install it: + // - It could have been created earlier + // - No permission to create it (e.g. running as non-admin when working on crowdsec) + //It will still work, windows will just display some additional errors in the event log + evtlog, err := eventlog.Open("CrowdSec") + + if err == nil { + //Send panic and fatal to event log, as they can happen before the logger is configured. + log.AddHook(&EventLogHook{ + LogLevels: []log.Level{ + log.PanicLevel, + log.FatalLevel, + }, + evtlog: evtlog, + }) + } else { + log.Warnf("Failed to open event log: %s", err) + } + + cConfig, err := csconfig.NewConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI) + if err != nil { + return err + } + + if err := LoadConfig(cConfig); err != nil { + return err + } + + // Configure logging + if err := types.SetDefaultLoggerConfig(cConfig.Common.LogMedia, cConfig.Common.LogDir, *cConfig.Common.LogLevel, + cConfig.Common.LogMaxSize, cConfig.Common.LogMaxFiles, cConfig.Common.LogMaxAge, cConfig.Common.CompressLogs, cConfig.Common.ForceColorLogs); err != nil { + return err + } + + log.Infof("starting %s service", name) + winsvc := crowdsec_winservice{config: cConfig} + + if err := svc.Run(name, &winsvc); err != nil { + return errors.Wrapf(err, "%s service failed", name) + } + + log.Infof("%s service stopped", name) + return nil +} diff --git a/cmd/crowdsec/win_service_install.go b/cmd/crowdsec/win_service_install.go new file mode 100644 index 0000000..b497bc9 --- /dev/null +++ b/cmd/crowdsec/win_service_install.go @@ -0,0 +1,95 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package main + +import ( + "fmt" + "os" + "path/filepath" + + "golang.org/x/sys/windows/svc/mgr" +) + +func exePath() (string, error) { + var err error + prog := os.Args[0] + p, err := filepath.Abs(prog) + if err != nil { + return "", err + } + fi, err := os.Stat(p) + if err == nil { + if !fi.Mode().IsDir() { + return p, nil + } + err = fmt.Errorf("%s is directory", p) + } + if filepath.Ext(p) == "" { + var fi os.FileInfo + + p += ".exe" + fi, err = os.Stat(p) + if err == nil { + if !fi.Mode().IsDir() { + return p, nil + } + err = fmt.Errorf("%s is directory", p) + } + } + return "", err +} + +func installService(name, desc string) error { + exepath, err := exePath() + if err != nil { + return err + } + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + s, err := m.OpenService(name) + if err == nil { + s.Close() + return fmt.Errorf("service %s already exists", name) + } + s, err = m.CreateService(name, exepath, mgr.Config{DisplayName: desc}, "is", "auto-started") + if err != nil { + return err + } + defer s.Close() + /*err = eventlog.InstallAsEventCreate(name, eventlog.Error|eventlog.Warning|eventlog.Info) + if err != nil { + s.Delete() + return fmt.Errorf("SetupEventLogSource() failed: %s", err) + }*/ + return nil +} + +func removeService(name string) error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + s, err := m.OpenService(name) + if err != nil { + return fmt.Errorf("service %s is not installed", name) + } + defer s.Close() + err = s.Delete() + if err != nil { + return err + } + /*err = eventlog.Remove(name) + if err != nil { + return fmt.Errorf("RemoveEventLogSource() failed: %s", err) + }*/ + return nil +} diff --git a/cmd/crowdsec/win_service_manage.go b/cmd/crowdsec/win_service_manage.go new file mode 100644 index 0000000..4aa1152 --- /dev/null +++ b/cmd/crowdsec/win_service_manage.go @@ -0,0 +1,64 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package main + +import ( + "fmt" + "time" + + //"time" + + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/mgr" +) + +func startService(name string) error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + s, err := m.OpenService(name) + if err != nil { + return fmt.Errorf("could not access service: %v", err) + } + defer s.Close() + err = s.Start("is", "manual-started") + if err != nil { + return fmt.Errorf("could not start service: %v", err) + } + return nil +} + +func controlService(name string, c svc.Cmd, to svc.State) error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + s, err := m.OpenService(name) + if err != nil { + return fmt.Errorf("could not access service: %v", err) + } + defer s.Close() + status, err := s.Control(c) + if err != nil { + return fmt.Errorf("could not send control=%d: %v", c, err) + } + timeout := time.Now().Add(10 * time.Second) + for status.State != to { + if timeout.Before(time.Now()) { + return fmt.Errorf("timeout waiting for service to go to state=%d", to) + } + time.Sleep(300 * time.Millisecond) + status, err = s.Query() + if err != nil { + return fmt.Errorf("could not retrieve service status: %v", err) + } + } + return nil +} diff --git a/config/acquis.yaml b/config/acquis.yaml new file mode 100644 index 0000000..cc3631f --- /dev/null +++ b/config/acquis.yaml @@ -0,0 +1,16 @@ +filenames: + - /var/log/nginx/*.log + - ./tests/nginx/nginx.log +#this is not a syslog log, indicate which kind of logs it is +labels: + type: nginx +--- +filenames: + - /var/log/auth.log + - /var/log/syslog +labels: + type: syslog +--- +filename: /var/log/apache2/*.log +labels: + type: apache2 diff --git a/config/acquis_win.yaml b/config/acquis_win.yaml new file mode 100644 index 0000000..a22dc26 --- /dev/null +++ b/config/acquis_win.yaml @@ -0,0 +1,8 @@ +source: wineventlog +event_channel: Security +event_ids: + - 4625 + - 4623 +event_level: information +labels: + type: eventlog \ No newline at end of file diff --git a/config/config.yaml b/config/config.yaml new file mode 100644 index 0000000..47a6cff --- /dev/null +++ b/config/config.yaml @@ -0,0 +1,63 @@ +common: + daemonize: true + pid_dir: /var/run/ + log_media: file + log_level: info + log_dir: /var/log/ + log_max_size: 20 + compress_logs: true + log_max_files: 10 + working_dir: . +config_paths: + config_dir: /etc/crowdsec/ + data_dir: /var/lib/crowdsec/data/ + simulation_path: /etc/crowdsec/simulation.yaml + hub_dir: /etc/crowdsec/hub/ + index_path: /etc/crowdsec/hub/.index.json + notification_dir: /etc/crowdsec/notifications/ + plugin_dir: /usr/local/lib/crowdsec/plugins/ +crowdsec_service: + acquisition_path: /etc/crowdsec/acquis.yaml + acquisition_dir: /etc/crowdsec/acquis.d + parser_routines: 1 +cscli: + output: human + color: auto +db_config: + log_level: info + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + #max_open_conns: 100 + #user: + #password: + #db_name: + #host: + #port: + flush: + max_items: 5000 + max_age: 7d +plugin_config: + user: nobody # plugin process would be ran on behalf of this user + group: nogroup # plugin process would be ran on behalf of this group +api: + client: + insecure_skip_verify: false + credentials_path: /etc/crowdsec/local_api_credentials.yaml + server: + log_level: info + listen_uri: 127.0.0.1:8080 + profiles_path: /etc/crowdsec/profiles.yaml + console_path: /etc/crowdsec/console.yaml + online_client: # Central API credentials (to push signals and receive bad IPs) + credentials_path: /etc/crowdsec/online_api_credentials.yaml + trusted_ips: # IP ranges, or IPs which can have admin API access + - 127.0.0.1 + - ::1 +# tls: +# cert_file: /etc/crowdsec/ssl/cert.pem +# key_file: /etc/crowdsec/ssl/key.pem +prometheus: + enabled: true + level: full + listen_addr: 127.0.0.1 + listen_port: 6060 diff --git a/config/config_win.yaml b/config/config_win.yaml new file mode 100644 index 0000000..021f983 --- /dev/null +++ b/config/config_win.yaml @@ -0,0 +1,49 @@ +common: + daemonize: false + log_media: file + log_level: info + log_dir: C:\ProgramData\CrowdSec\log\ + working_dir: . +config_paths: + config_dir: C:\ProgramData\CrowdSec\config\ + data_dir: C:\ProgramData\CrowdSec\data\ + simulation_path: C:\ProgramData\CrowdSec\config\simulation.yaml + hub_dir: C:\ProgramData\CrowdSec\hub\ + index_path: C:\ProgramData\CrowdSec\hub\.index.json + plugin_dir: C:\ProgramData\CrowdSec\plugins\ + notification_dir: C:\ProgramData\CrowdSec\config\notifications\ +crowdsec_service: + acquisition_path: C:\ProgramData\CrowdSec\config\acquis.yaml + parser_routines: 1 +cscli: + output: human +db_config: + log_level: info + type: sqlite + db_path: C:\ProgramData\CrowdSec\data\crowdsec.db + #user: + #password: + #db_name: + #host: + #port: + flush: + max_items: 5000 + max_age: 7d +api: + client: + insecure_skip_verify: false + credentials_path: C:\ProgramData\CrowdSec\config\local_api_credentials.yaml + server: + log_level: info + listen_uri: 127.0.0.1:8080 + profiles_path: C:\ProgramData\Crowdsec\config\profiles.yaml + online_client: # Crowdsec API credentials (to push signals and receive bad IPs) + credentials_path: C:\ProgramData\CrowdSec\config\online_api_credentials.yaml +# tls: +# cert_file: /etc/crowdsec/ssl/cert.pem +# key_file: /etc/crowdsec/ssl/key.pem +prometheus: + enabled: true + level: full + listen_addr: 127.0.0.1 + listen_port: 6060 diff --git a/config/config_win_no_lapi.yaml b/config/config_win_no_lapi.yaml new file mode 100644 index 0000000..35c7f2c --- /dev/null +++ b/config/config_win_no_lapi.yaml @@ -0,0 +1,28 @@ +common: + daemonize: true + log_media: file + log_level: info + log_dir: C:\ProgramData\CrowdSec\log\ + working_dir: . +config_paths: + config_dir: C:\ProgramData\CrowdSec\config\ + data_dir: C:\ProgramData\CrowdSec\data\ + simulation_path: C:\ProgramData\CrowdSec\config\simulation.yaml + hub_dir: C:\ProgramData\CrowdSec\hub\ + index_path: C:\ProgramData\CrowdSec\hub\.index.json + plugin_dir: C:\ProgramData\CrowdSec\plugins\ + notification_dir: C:\ProgramData\CrowdSec\config\notifications\ +crowdsec_service: + acquisition_path: C:\ProgramData\CrowdSec\config\acquis.yaml + parser_routines: 1 +cscli: + output: human +api: + client: + insecure_skip_verify: false + credentials_path: C:\ProgramData\CrowdSec\config\local_api_credentials.yaml +prometheus: + enabled: true + level: full + listen_addr: 127.0.0.1 + listen_port: 6060 diff --git a/config/console.yaml b/config/console.yaml new file mode 100644 index 0000000..e83658d --- /dev/null +++ b/config/console.yaml @@ -0,0 +1,3 @@ +share_manual_decisions: false +share_custom: true +share_tainted: true diff --git a/config/crowdsec.cron.daily b/config/crowdsec.cron.daily new file mode 100644 index 0000000..1c110df --- /dev/null +++ b/config/crowdsec.cron.daily @@ -0,0 +1,14 @@ +#!/bin/sh + +test -x /usr/bin/cscli || exit 0 + +/usr/bin/cscli --error hub update + +upgraded=$(/usr/bin/cscli --error hub upgrade) +if [ -n "$upgraded" ]; then + # splay initial metrics push + sleep $(seq 1 90 | shuf -n 1) + systemctl reload crowdsec +fi + +exit 0 diff --git a/config/crowdsec.service b/config/crowdsec.service new file mode 100644 index 0000000..4b6c26f --- /dev/null +++ b/config/crowdsec.service @@ -0,0 +1,14 @@ +[Unit] +Description=Crowdsec agent +After=syslog.target network.target remote-fs.target nss-lookup.target + +[Service] +Type=notify +Environment=LC_ALL=C LANG=C +ExecStartPre=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t +ExecStart=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml +#ExecStartPost=/bin/sleep 0.1 +ExecReload=/bin/kill -HUP $MAINPID + +[Install] +WantedBy=multi-user.target diff --git a/config/dev.yaml b/config/dev.yaml new file mode 100644 index 0000000..a45c4fa --- /dev/null +++ b/config/dev.yaml @@ -0,0 +1,48 @@ +common: + daemonize: true + pid_dir: /tmp/ + log_media: stdout + log_level: info + working_dir: . +config_paths: + config_dir: ./config + data_dir: ./data/ + notification_dir: ./config/notifications/ + plugin_dir: ./plugins/ + #simulation_path: /etc/crowdsec/config/simulation.yaml + #hub_dir: /etc/crowdsec/hub/ + #index_path: ./config/hub/.index.json +crowdsec_service: + acquisition_path: ./config/acquis.yaml + parser_routines: 1 +plugin_config: + user: $USER # plugin process would be ran on behalf of this user + group: $USER # plugin process would be ran on behalf of this group +cscli: + output: human +db_config: + type: sqlite + db_path: ./data/crowdsec.db + user: root + password: crowdsec + db_name: crowdsec + host: "172.17.0.2" + port: 3306 + flush: + #max_items: 10000 + #max_age: 168h +api: + client: + credentials_path: ./config/local_api_credentials.yaml + server: + #insecure_skip_verify: true + listen_uri: 127.0.0.1:8081 + profiles_path: ./config/profiles.yaml + tls: + #cert_file: ./cert.pem + #key_file: ./key.pem + online_client: # Central API + credentials_path: ./config/online_api_credentials.yaml +prometheus: + enabled: true + level: full diff --git a/config/local_api_credentials.yaml b/config/local_api_credentials.yaml new file mode 100644 index 0000000..10a09ff --- /dev/null +++ b/config/local_api_credentials.yaml @@ -0,0 +1 @@ +url: http://127.0.0.1:8080 \ No newline at end of file diff --git a/config/online_api_credentials.yaml b/config/online_api_credentials.yaml new file mode 100644 index 0000000..e69de29 diff --git a/config/patterns/aws b/config/patterns/aws new file mode 100644 index 0000000..5816ce1 --- /dev/null +++ b/config/patterns/aws @@ -0,0 +1,11 @@ +S3_REQUEST_LINE (?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest}) + +S3_ACCESS_LOG %{WORD:owner} %{NOTSPACE:bucket} \[%{HTTPDATE:timestamp}\] %{IP:clientip} %{NOTSPACE:requester} %{NOTSPACE:request_id} %{NOTSPACE:operation} %{NOTSPACE:key} (?:"%{S3_REQUEST_LINE}"|-) (?:%{INT:response:int}|-) (?:-|%{NOTSPACE:error_code}) (?:%{INT:bytes:int}|-) (?:%{INT:object_size:int}|-) (?:%{INT:request_time_ms:int}|-) (?:%{INT:turnaround_time_ms:int}|-) (?:%{QS:referrer}|-) (?:"?%{QS:agent}"?|-) (?:-|%{NOTSPACE:version_id}) + +ELB_URIPATHPARAM %{URIPATH:path}(?:%{URIPARAM:params})? + +ELB_URI %{URIPROTO:proto}://(?:%{USER}(?::[^@]*)?@)?(?:%{URIHOST:urihost})?(?:%{ELB_URIPATHPARAM})? + +ELB_REQUEST_LINE (?:%{WORD:verb} %{ELB_URI:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest}) + +ELB_ACCESS_LOG %{TIMESTAMP_ISO8601:timestamp} %{NOTSPACE:elb} %{IP:clientip}:%{INT:clientport:int} (?:(%{IP:backendip}:?:%{INT:backendport:int})|-) %{NUMBER:request_processing_time:float} %{NUMBER:backend_processing_time:float} %{NUMBER:response_processing_time:float} %{INT:response:int} %{INT:backend_response:int} %{INT:received_bytes:int} %{INT:bytes:int} "%{ELB_REQUEST_LINE}" \ No newline at end of file diff --git a/config/patterns/bacula b/config/patterns/bacula new file mode 100644 index 0000000..96ff0e0 --- /dev/null +++ b/config/patterns/bacula @@ -0,0 +1,50 @@ +BACULA_TIMESTAMP %{MONTHDAY}-%{MONTH} %{HOUR}:%{MINUTE} +BACULA_HOST [a-zA-Z0-9-]+ +BACULA_VOLUME %{USER} +BACULA_DEVICE %{USER} +BACULA_DEVICEPATH %{UNIXPATH} +BACULA_CAPACITY %{INT}{1,3}(,%{INT}{3})* +BACULA_VERSION %{USER} +BACULA_JOB %{USER} + +BACULA_LOG_MAX_CAPACITY User defined maximum volume capacity %{BACULA_CAPACITY} exceeded on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\) +BACULA_LOG_END_VOLUME End of medium on Volume \"%{BACULA_VOLUME:volume}\" Bytes=%{BACULA_CAPACITY} Blocks=%{BACULA_CAPACITY} at %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}. +BACULA_LOG_NEW_VOLUME Created new Volume \"%{BACULA_VOLUME:volume}\" in catalog. +BACULA_LOG_NEW_LABEL Labeled new Volume \"%{BACULA_VOLUME:volume}\" on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\). +BACULA_LOG_WROTE_LABEL Wrote label to prelabeled Volume \"%{BACULA_VOLUME:volume}\" on device \"%{BACULA_DEVICE}\" \(%{BACULA_DEVICEPATH}\) +BACULA_LOG_NEW_MOUNT New volume \"%{BACULA_VOLUME:volume}\" mounted on device \"%{BACULA_DEVICE:device}\" \(%{BACULA_DEVICEPATH}\) at %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}. +BACULA_LOG_NOOPEN \s+Cannot open %{DATA}: ERR=%{GREEDYDATA:berror} +BACULA_LOG_NOOPENDIR \s+Could not open directory %{DATA}: ERR=%{GREEDYDATA:berror} +BACULA_LOG_NOSTAT \s+Could not stat %{DATA}: ERR=%{GREEDYDATA:berror} +BACULA_LOG_NOJOBS There are no more Jobs associated with Volume \"%{BACULA_VOLUME:volume}\". Marking it purged. +BACULA_LOG_ALL_RECORDS_PRUNED All records pruned from Volume \"%{BACULA_VOLUME:volume}\"; marking it \"Purged\" +BACULA_LOG_BEGIN_PRUNE_JOBS Begin pruning Jobs older than %{INT} month %{INT} days . +BACULA_LOG_BEGIN_PRUNE_FILES Begin pruning Files. +BACULA_LOG_PRUNED_JOBS Pruned %{INT} Jobs* for client %{BACULA_HOST:client} from catalog. +BACULA_LOG_PRUNED_FILES Pruned Files from %{INT} Jobs* for client %{BACULA_HOST:client} from catalog. +BACULA_LOG_ENDPRUNE End auto prune. +BACULA_LOG_STARTJOB Start Backup JobId %{INT}, Job=%{BACULA_JOB:job} +BACULA_LOG_STARTRESTORE Start Restore Job %{BACULA_JOB:job} +BACULA_LOG_USEDEVICE Using Device \"%{BACULA_DEVICE:device}\" +BACULA_LOG_DIFF_FS \s+%{UNIXPATH} is a different filesystem. Will not descend from %{UNIXPATH} into it. +BACULA_LOG_JOBEND Job write elapsed time = %{DATA:elapsed}, Transfer rate = %{NUMBER} (K|M|G)? Bytes/second +BACULA_LOG_NOPRUNE_JOBS No Jobs found to prune. +BACULA_LOG_NOPRUNE_FILES No Files found to prune. +BACULA_LOG_VOLUME_PREVWRITTEN Volume \"%{BACULA_VOLUME:volume}\" previously written, moving to end of data. +BACULA_LOG_READYAPPEND Ready to append to end of Volume \"%{BACULA_VOLUME:volume}\" size=%{INT} +BACULA_LOG_CANCELLING Cancelling duplicate JobId=%{INT}. +BACULA_LOG_MARKCANCEL JobId %{INT}, Job %{BACULA_JOB:job} marked to be canceled. +BACULA_LOG_CLIENT_RBJ shell command: run ClientRunBeforeJob \"%{GREEDYDATA:runjob}\" +BACULA_LOG_VSS (Generate )?VSS (Writer)? +BACULA_LOG_MAXSTART Fatal error: Job canceled because max start delay time exceeded. +BACULA_LOG_DUPLICATE Fatal error: JobId %{INT:duplicate} already running. Duplicate job not allowed. +BACULA_LOG_NOJOBSTAT Fatal error: No Job status returned from FD. +BACULA_LOG_FATAL_CONN Fatal error: bsock.c:133 Unable to connect to (Client: %{BACULA_HOST:client}|Storage daemon) on %{HOSTNAME}:%{POSINT}. ERR=%{GREEDYDATA:berror} +BACULA_LOG_NO_CONNECT Warning: bsock.c:127 Could not connect to (Client: %{BACULA_HOST:client}|Storage daemon) on %{HOSTNAME}:%{POSINT}. ERR=%{GREEDYDATA:berror} +BACULA_LOG_NO_AUTH Fatal error: Unable to authenticate with File daemon at %{HOSTNAME}. Possible causes: +BACULA_LOG_NOSUIT No prior or suitable Full backup found in catalog. Doing FULL backup. +BACULA_LOG_NOPRIOR No prior Full backup Job record found. + +BACULA_LOG_JOB (Error: )?Bacula %{BACULA_HOST} %{BACULA_VERSION} \(%{BACULA_VERSION}\): + +BACULA_LOGLINE %{BACULA_TIMESTAMP:bts} %{BACULA_HOST:hostname} JobId %{INT:jobid}: (%{BACULA_LOG_MAX_CAPACITY}|%{BACULA_LOG_END_VOLUME}|%{BACULA_LOG_NEW_VOLUME}|%{BACULA_LOG_NEW_LABEL}|%{BACULA_LOG_WROTE_LABEL}|%{BACULA_LOG_NEW_MOUNT}|%{BACULA_LOG_NOOPEN}|%{BACULA_LOG_NOOPENDIR}|%{BACULA_LOG_NOSTAT}|%{BACULA_LOG_NOJOBS}|%{BACULA_LOG_ALL_RECORDS_PRUNED}|%{BACULA_LOG_BEGIN_PRUNE_JOBS}|%{BACULA_LOG_BEGIN_PRUNE_FILES}|%{BACULA_LOG_PRUNED_JOBS}|%{BACULA_LOG_PRUNED_FILES}|%{BACULA_LOG_ENDPRUNE}|%{BACULA_LOG_STARTJOB}|%{BACULA_LOG_STARTRESTORE}|%{BACULA_LOG_USEDEVICE}|%{BACULA_LOG_DIFF_FS}|%{BACULA_LOG_JOBEND}|%{BACULA_LOG_NOPRUNE_JOBS}|%{BACULA_LOG_NOPRUNE_FILES}|%{BACULA_LOG_VOLUME_PREVWRITTEN}|%{BACULA_LOG_READYAPPEND}|%{BACULA_LOG_CANCELLING}|%{BACULA_LOG_MARKCANCEL}|%{BACULA_LOG_CLIENT_RBJ}|%{BACULA_LOG_VSS}|%{BACULA_LOG_MAXSTART}|%{BACULA_LOG_DUPLICATE}|%{BACULA_LOG_NOJOBSTAT}|%{BACULA_LOG_FATAL_CONN}|%{BACULA_LOG_NO_CONNECT}|%{BACULA_LOG_NO_AUTH}|%{BACULA_LOG_NOSUIT}|%{BACULA_LOG_JOB}|%{BACULA_LOG_NOPRIOR}) \ No newline at end of file diff --git a/config/patterns/bro b/config/patterns/bro new file mode 100644 index 0000000..e8d3749 --- /dev/null +++ b/config/patterns/bro @@ -0,0 +1,13 @@ +# https://www.bro.org/sphinx/script-reference/log-files.html + +# http.log +BRO_HTTP %{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{INT:trans_depth}\t%{GREEDYDATA:method}\t%{GREEDYDATA:domain}\t%{GREEDYDATA:uri}\t%{GREEDYDATA:referrer}\t%{GREEDYDATA:user_agent}\t%{NUMBER:request_body_len}\t%{NUMBER:response_body_len}\t%{GREEDYDATA:status_code}\t%{GREEDYDATA:status_msg}\t%{GREEDYDATA:info_code}\t%{GREEDYDATA:info_msg}\t%{GREEDYDATA:filename}\t%{GREEDYDATA:bro_tags}\t%{GREEDYDATA:username}\t%{GREEDYDATA:password}\t%{GREEDYDATA:proxied}\t%{GREEDYDATA:orig_fuids}\t%{GREEDYDATA:orig_mime_types}\t%{GREEDYDATA:resp_fuids}\t%{GREEDYDATA:resp_mime_types} + +# dns.log +BRO_DNS %{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{WORD:proto}\t%{INT:trans_id}\t%{GREEDYDATA:query}\t%{GREEDYDATA:qclass}\t%{GREEDYDATA:qclass_name}\t%{GREEDYDATA:qtype}\t%{GREEDYDATA:qtype_name}\t%{GREEDYDATA:rcode}\t%{GREEDYDATA:rcode_name}\t%{GREEDYDATA:AA}\t%{GREEDYDATA:TC}\t%{GREEDYDATA:RD}\t%{GREEDYDATA:RA}\t%{GREEDYDATA:Z}\t%{GREEDYDATA:answers}\t%{GREEDYDATA:TTLs}\t%{GREEDYDATA:rejected} + +# conn.log +BRO_CONN %{NUMBER:ts}\t%{NOTSPACE:uid}\t%{IP:orig_h}\t%{INT:orig_p}\t%{IP:resp_h}\t%{INT:resp_p}\t%{WORD:proto}\t%{GREEDYDATA:service}\t%{NUMBER:duration}\t%{NUMBER:orig_bytes}\t%{NUMBER:resp_bytes}\t%{GREEDYDATA:conn_state}\t%{GREEDYDATA:local_orig}\t%{GREEDYDATA:missed_bytes}\t%{GREEDYDATA:history}\t%{GREEDYDATA:orig_pkts}\t%{GREEDYDATA:orig_ip_bytes}\t%{GREEDYDATA:resp_pkts}\t%{GREEDYDATA:resp_ip_bytes}\t%{GREEDYDATA:tunnel_parents} + +# files.log +BRO_FILES %{NUMBER:ts}\t%{NOTSPACE:fuid}\t%{IP:tx_hosts}\t%{IP:rx_hosts}\t%{NOTSPACE:conn_uids}\t%{GREEDYDATA:source}\t%{GREEDYDATA:depth}\t%{GREEDYDATA:analyzers}\t%{GREEDYDATA:mime_type}\t%{GREEDYDATA:filename}\t%{GREEDYDATA:duration}\t%{GREEDYDATA:local_orig}\t%{GREEDYDATA:is_orig}\t%{GREEDYDATA:seen_bytes}\t%{GREEDYDATA:total_bytes}\t%{GREEDYDATA:missing_bytes}\t%{GREEDYDATA:overflow_bytes}\t%{GREEDYDATA:timedout}\t%{GREEDYDATA:parent_fuid}\t%{GREEDYDATA:md5}\t%{GREEDYDATA:sha1}\t%{GREEDYDATA:sha256}\t%{GREEDYDATA:extracted} \ No newline at end of file diff --git a/config/patterns/cowrie_honeypot b/config/patterns/cowrie_honeypot new file mode 100644 index 0000000..eda0c9e --- /dev/null +++ b/config/patterns/cowrie_honeypot @@ -0,0 +1 @@ +COWRIE_NEW_CO New connection: %{IPV4:source_ip}:[0-9]+ \(%{IPV4:dest_ip}:%{INT:dest_port}\) \[session: %{DATA:telnet_session}\]$ \ No newline at end of file diff --git a/config/patterns/exim b/config/patterns/exim new file mode 100644 index 0000000..f135561 --- /dev/null +++ b/config/patterns/exim @@ -0,0 +1,12 @@ +EXIM_MSGID [0-9A-Za-z]{6}-[0-9A-Za-z]{6}-[0-9A-Za-z]{2} +EXIM_FLAGS (<=|[-=>*]>|[*]{2}|==) +EXIM_DATE %{YEAR:exim_year}-%{MONTHNUM:exim_month}-%{MONTHDAY:exim_day} %{TIME:exim_time} +EXIM_PID \[%{POSINT}\] +EXIM_QT ((\d+y)?(\d+w)?(\d+d)?(\d+h)?(\d+m)?(\d+s)?) +EXIM_EXCLUDE_TERMS (Message is frozen|(Start|End) queue run| Warning: | retry time not reached | no (IP address|host name) found for (IP address|host) | unexpected disconnection while reading SMTP command | no immediate delivery: |another process is handling this message) +EXIM_REMOTE_HOST (H=(%{NOTSPACE:remote_hostname} )?(\(%{NOTSPACE:remote_heloname}\) )?\[%{IP:remote_host}\]) +EXIM_INTERFACE (I=\[%{IP:exim_interface}\](:%{NUMBER:exim_interface_port})) +EXIM_PROTOCOL (P=%{NOTSPACE:protocol}) +EXIM_MSG_SIZE (S=%{NUMBER:exim_msg_size}) +EXIM_HEADER_ID (id=%{NOTSPACE:exim_header_id}) +EXIM_SUBJECT (T=%{QS:exim_subject}) \ No newline at end of file diff --git a/config/patterns/firewalls b/config/patterns/firewalls new file mode 100644 index 0000000..fafa7ec --- /dev/null +++ b/config/patterns/firewalls @@ -0,0 +1,86 @@ +# NetScreen firewall logs +NETSCREENSESSIONLOG %{SYSLOGTIMESTAMP:date} %{IPORHOST:device} %{IPORHOST}: NetScreen device_id=%{WORD:device_id}%{DATA}: start_time=%{QUOTEDSTRING:start_time} duration=%{INT:duration} policy_id=%{INT:policy_id} service=%{DATA:service} proto=%{INT:proto} src zone=%{WORD:src_zone} dst zone=%{WORD:dst_zone} action=%{WORD:action} sent=%{INT:sent} rcvd=%{INT:rcvd} src=%{IPORHOST:src_ip} dst=%{IPORHOST:dst_ip} src_port=%{INT:src_port} dst_port=%{INT:dst_port} src-xlated ip=%{IPORHOST:src_xlated_ip} port=%{INT:src_xlated_port} dst-xlated ip=%{IPORHOST:dst_xlated_ip} port=%{INT:dst_xlated_port} session_id=%{INT:session_id} reason=%{GREEDYDATA:reason} + +#== Cisco ASA == +CISCOTAG [A-Z0-9]+-%{INT}-(?:[A-Z0-9_]+) +CISCOTIMESTAMP %{MONTH} +%{MONTHDAY}(?: %{YEAR})? %{TIME} +CISCO_TAGGED_SYSLOG ^<%{POSINT:syslog_pri}>%{CISCOTIMESTAMP:timestamp}( %{SYSLOGHOST:sysloghost})? ?: %%{CISCOTAG:ciscotag}: +# Common Particles +CISCO_ACTION Built|Teardown|Deny|Denied|denied|requested|permitted|denied by ACL|discarded|est-allowed|Dropping|created|deleted +CISCO_REASON Duplicate TCP SYN|Failed to locate egress interface|Invalid transport field|No matching connection|DNS Response|DNS Query|(?:%{WORD}\s*)* +CISCO_DIRECTION Inbound|inbound|Outbound|outbound +CISCO_INTERVAL first hit|%{INT}-second interval +CISCO_XLATE_TYPE static|dynamic +# ASA-1-104001 +CISCOFW104001 \((?:Primary|Secondary)\) Switching to ACTIVE - %{GREEDYDATA:switch_reason} +# ASA-1-104002 +CISCOFW104002 \((?:Primary|Secondary)\) Switching to STANDBY - %{GREEDYDATA:switch_reason} +# ASA-1-104003 +CISCOFW104003 \((?:Primary|Secondary)\) Switching to FAILED\. +# ASA-1-104004 +CISCOFW104004 \((?:Primary|Secondary)\) Switching to OK\. +# ASA-1-105003 +CISCOFW105003 \((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{GREEDYDATA:interface_name} waiting +# ASA-1-105004 +CISCOFW105004 \((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{GREEDYDATA:interface_name} normal +# ASA-1-105005 +CISCOFW105005 \((?:Primary|Secondary)\) Lost Failover communications with mate on [Ii]nterface %{GREEDYDATA:interface_name} +# ASA-1-105008 +CISCOFW105008 \((?:Primary|Secondary)\) Testing [Ii]nterface %{GREEDYDATA:interface_name} +# ASA-1-105009 +CISCOFW105009 \((?:Primary|Secondary)\) Testing on [Ii]nterface %{GREEDYDATA:interface_name} (?:Passed|Failed) +# ASA-2-106001 +CISCOFW106001 %{CISCO_DIRECTION:direction} %{WORD:protocol} connection %{CISCO_ACTION:action} from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} flags %{GREEDYDATA:tcp_flags} on interface %{GREEDYDATA:interface} +# ASA-2-106006, ASA-2-106007, ASA-2-106010 +CISCOFW106006_106007_106010 %{CISCO_ACTION:action} %{CISCO_DIRECTION:direction} %{WORD:protocol} (?:from|src) %{IP:src_ip}/%{INT:src_port}(\(%{DATA:src_fwuser}\))? (?:to|dst) %{IP:dst_ip}/%{INT:dst_port}(\(%{DATA:dst_fwuser}\))? (?:on interface %{DATA:interface}|due to %{CISCO_REASON:reason}) +# ASA-3-106014 +CISCOFW106014 %{CISCO_ACTION:action} %{CISCO_DIRECTION:direction} %{WORD:protocol} src %{DATA:src_interface}:%{IP:src_ip}(\(%{DATA:src_fwuser}\))? dst %{DATA:dst_interface}:%{IP:dst_ip}(\(%{DATA:dst_fwuser}\))? \(type %{INT:icmp_type}, code %{INT:icmp_code}\) +# ASA-6-106015 +CISCOFW106015 %{CISCO_ACTION:action} %{WORD:protocol} \(%{DATA:policy_id}\) from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} flags %{DATA:tcp_flags} on interface %{GREEDYDATA:interface} +# ASA-1-106021 +CISCOFW106021 %{CISCO_ACTION:action} %{WORD:protocol} reverse path check from %{IP:src_ip} to %{IP:dst_ip} on interface %{GREEDYDATA:interface} +# ASA-4-106023 +CISCOFW106023 %{CISCO_ACTION:action}( protocol)? %{WORD:protocol} src %{DATA:src_interface}:%{DATA:src_ip}(/%{INT:src_port})?(\(%{DATA:src_fwuser}\))? dst %{DATA:dst_interface}:%{DATA:dst_ip}(/%{INT:dst_port})?(\(%{DATA:dst_fwuser}\))?( \(type %{INT:icmp_type}, code %{INT:icmp_code}\))? by access-group "?%{DATA:policy_id}"? \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +# ASA-4-106100, ASA-4-106102, ASA-4-106103 +CISCOFW106100_2_3 access-list %{NOTSPACE:policy_id} %{CISCO_ACTION:action} %{WORD:protocol} for user '%{DATA:src_fwuser}' %{DATA:src_interface}/%{IP:src_ip}\(%{INT:src_port}\) -> %{DATA:dst_interface}/%{IP:dst_ip}\(%{INT:dst_port}\) hit-cnt %{INT:hit_count} %{CISCO_INTERVAL:interval} \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +# ASA-5-106100 +CISCOFW106100 access-list %{NOTSPACE:policy_id} %{CISCO_ACTION:action} %{WORD:protocol} %{DATA:src_interface}/%{IP:src_ip}\(%{INT:src_port}\)(\(%{DATA:src_fwuser}\))? -> %{DATA:dst_interface}/%{IP:dst_ip}\(%{INT:dst_port}\)(\(%{DATA:src_fwuser}\))? hit-cnt %{INT:hit_count} %{CISCO_INTERVAL:interval} \[%{DATA:hashcode1}, %{DATA:hashcode2}\] +# ASA-6-110002 +CISCOFW110002 %{CISCO_REASON:reason} for %{WORD:protocol} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} +# ASA-6-302010 +CISCOFW302010 %{INT:connection_count} in use, %{INT:connection_count_max} most used +# ASA-6-302013, ASA-6-302014, ASA-6-302015, ASA-6-302016 +CISCOFW302013_302014_302015_302016 %{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection %{INT:connection_id} for %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port}( \(%{IP:src_mapped_ip}/%{INT:src_mapped_port}\))?(\(%{DATA:src_fwuser}\))? to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}( \(%{IP:dst_mapped_ip}/%{INT:dst_mapped_port}\))?(\(%{DATA:dst_fwuser}\))?( duration %{TIME:duration} bytes %{INT:bytes})?(?: %{CISCO_REASON:reason})?( \(%{DATA:user}\))? +# ASA-6-302020, ASA-6-302021 +CISCOFW302020_302021 %{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection for faddr %{IP:dst_ip}/%{INT:icmp_seq_num}(?:\(%{DATA:fwuser}\))? gaddr %{IP:src_xlated_ip}/%{INT:icmp_code_xlated} laddr %{IP:src_ip}/%{INT:icmp_code}( \(%{DATA:user}\))? +# ASA-6-305011 +CISCOFW305011 %{CISCO_ACTION:action} %{CISCO_XLATE_TYPE:xlate_type} %{WORD:protocol} translation from %{DATA:src_interface}:%{IP:src_ip}(/%{INT:src_port})?(\(%{DATA:src_fwuser}\))? to %{DATA:src_xlated_interface}:%{IP:src_xlated_ip}/%{DATA:src_xlated_port} +# ASA-3-313001, ASA-3-313004, ASA-3-313008 +CISCOFW313001_313004_313008 %{CISCO_ACTION:action} %{WORD:protocol} type=%{INT:icmp_type}, code=%{INT:icmp_code} from %{IP:src_ip} on interface %{DATA:interface}( to %{IP:dst_ip})? +# ASA-4-313005 +CISCOFW313005 %{CISCO_REASON:reason} for %{WORD:protocol} error message: %{WORD:err_protocol} src %{DATA:err_src_interface}:%{IP:err_src_ip}(\(%{DATA:err_src_fwuser}\))? dst %{DATA:err_dst_interface}:%{IP:err_dst_ip}(\(%{DATA:err_dst_fwuser}\))? \(type %{INT:err_icmp_type}, code %{INT:err_icmp_code}\) on %{DATA:interface} interface\. Original IP payload: %{WORD:protocol} src %{IP:orig_src_ip}/%{INT:orig_src_port}(\(%{DATA:orig_src_fwuser}\))? dst %{IP:orig_dst_ip}/%{INT:orig_dst_port}(\(%{DATA:orig_dst_fwuser}\))? +# ASA-5-321001 +CISCOFW321001 Resource '%{WORD:resource_name}' limit of %{POSINT:resource_limit} reached for system +# ASA-4-402117 +CISCOFW402117 %{WORD:protocol}: Received a non-IPSec packet \(protocol= %{WORD:orig_protocol}\) from %{IP:src_ip} to %{IP:dst_ip} +# ASA-4-402119 +CISCOFW402119 %{WORD:protocol}: Received an %{WORD:orig_protocol} packet \(SPI= %{DATA:spi}, sequence number= %{DATA:seq_num}\) from %{IP:src_ip} \(user= %{DATA:user}\) to %{IP:dst_ip} that failed anti-replay checking +# ASA-4-419001 +CISCOFW419001 %{CISCO_ACTION:action} %{WORD:protocol} packet from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}, reason: %{GREEDYDATA:reason} +# ASA-4-419002 +CISCOFW419002 %{CISCO_REASON:reason} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port} with different initial sequence number +# ASA-4-500004 +CISCOFW500004 %{CISCO_REASON:reason} for protocol=%{WORD:protocol}, from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} +# ASA-6-602303, ASA-6-602304 +CISCOFW602303_602304 %{WORD:protocol}: An %{CISCO_DIRECTION:direction} %{GREEDYDATA:tunnel_type} SA \(SPI= %{DATA:spi}\) between %{IP:src_ip} and %{IP:dst_ip} \(user= %{DATA:user}\) has been %{CISCO_ACTION:action} +# ASA-7-710001, ASA-7-710002, ASA-7-710003, ASA-7-710005, ASA-7-710006 +CISCOFW710001_710002_710003_710005_710006 %{WORD:protocol} (?:request|access) %{CISCO_ACTION:action} from %{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port} +# ASA-6-713172 +CISCOFW713172 Group = %{GREEDYDATA:group}, IP = %{IP:src_ip}, Automatic NAT Detection Status:\s+Remote end\s*%{DATA:is_remote_natted}\s*behind a NAT device\s+This\s+end\s*%{DATA:is_local_natted}\s*behind a NAT device +# ASA-4-733100 +CISCOFW733100 \[\s*%{DATA:drop_type}\s*\] drop %{DATA:drop_rate_id} exceeded. Current burst rate is %{INT:drop_rate_current_burst} per second, max configured rate is %{INT:drop_rate_max_burst}; Current average rate is %{INT:drop_rate_current_avg} per second, max configured rate is %{INT:drop_rate_max_avg}; Cumulative total count is %{INT:drop_total_count} +#== End Cisco ASA == + +# Shorewall firewall logs +SHOREWALL (%{SYSLOGTIMESTAMP:timestamp}) (%{WORD:nf_host}) kernel:.*Shorewall:(%{WORD:nf_action1})?:(%{WORD:nf_action2})?.*IN=(%{USERNAME:nf_in_interface})?.*(OUT= *MAC=(%{COMMONMAC:nf_dst_mac}):(%{COMMONMAC:nf_src_mac})?|OUT=%{USERNAME:nf_out_interface}).*SRC=(%{IPV4:nf_src_ip}).*DST=(%{IPV4:nf_dst_ip}).*LEN=(%{WORD:nf_len}).*?TOS=(%{WORD:nf_tos}).*?PREC=(%{WORD:nf_prec}).*?TTL=(%{INT:nf_ttl}).*?ID=(%{INT:nf_id}).*?PROTO=(%{WORD:nf_protocol}).*?SPT=(%{INT:nf_src_port}?.*DPT=%{INT:nf_dst_port}?.*) +#== End Shorewall \ No newline at end of file diff --git a/config/patterns/haproxy b/config/patterns/haproxy new file mode 100644 index 0000000..c71bc31 --- /dev/null +++ b/config/patterns/haproxy @@ -0,0 +1,39 @@ +## These patterns were tested w/ haproxy-1.4.15 + +## Documentation of the haproxy log formats can be found at the following links: +## http://code.google.com/p/haproxy-docs/wiki/HTTPLogFormat +## http://code.google.com/p/haproxy-docs/wiki/TCPLogFormat + +HAPROXYTIME %{HOUR:haproxy_hour}:%{MINUTE:haproxy_minute}(?::%{SECOND:haproxy_second}) +HAPROXYDATE %{MONTHDAY:haproxy_monthday}/%{MONTH:haproxy_month}/%{YEAR:haproxy_year}:%{HAPROXYTIME:haproxy_time}.%{INT:haproxy_milliseconds} + +# Override these default patterns to parse out what is captured in your haproxy.cfg +HAPROXYCAPTUREDREQUESTHEADERS %{DATA:captured_request_headers} +HAPROXYCAPTUREDRESPONSEHEADERS %{DATA:captured_response_headers} + +# Example: +# These haproxy config lines will add data to the logs that are captured +# by the patterns below. Place them in your custom patterns directory to +# override the defaults. +# +# capture request header Host len 40 +# capture request header X-Forwarded-For len 50 +# capture request header Accept-Language len 50 +# capture request header Referer len 200 +# capture request header User-Agent len 200 +# +# capture response header Content-Type len 30 +# capture response header Content-Encoding len 10 +# capture response header Cache-Control len 200 +# capture response header Last-Modified len 200 +# +# HAPROXYCAPTUREDREQUESTHEADERS %{DATA:request_header_host}\|%{DATA:request_header_x_forwarded_for}\|%{DATA:request_header_accept_language}\|%{DATA:request_header_referer}\|%{DATA:request_header_user_agent} +# HAPROXYCAPTUREDRESPONSEHEADERS %{DATA:response_header_content_type}\|%{DATA:response_header_content_encoding}\|%{DATA:response_header_cache_control}\|%{DATA:response_header_last_modified} + +# parse a haproxy 'httplog' line +HAPROXYHTTPBASE %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{DATA:captured_request_cookie} %{DATA:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} (\{%{HAPROXYCAPTUREDREQUESTHEADERS}\})?( )?(\{%{HAPROXYCAPTUREDRESPONSEHEADERS}\})?( )?"(|(%{WORD:http_verb} (%{URIPROTO:http_proto}://)?(?:%{USER:http_user}(?::[^@]*)?@)?(?:%{URIHOST:http_host})?(?:%{URIPATHPARAM:http_request})?( HTTP/%{NUMBER:http_version})?))?" + +HAPROXYHTTP (?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{HAPROXYHTTPBASE} + +# parse a haproxy 'tcplog' line +HAPROXYTCP (?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} \ No newline at end of file diff --git a/config/patterns/java b/config/patterns/java new file mode 100644 index 0000000..d0ad391 --- /dev/null +++ b/config/patterns/java @@ -0,0 +1,20 @@ +JAVACLASS (?:[a-zA-Z$_][a-zA-Z$_0-9]*\.)*[a-zA-Z$_][a-zA-Z$_0-9]* +#Space is an allowed character to match special cases like 'Native Method' or 'Unknown Source' +JAVAFILE (?:[A-Za-z0-9_. -]+) +#Allow special method +JAVAMETHOD (?:()|[a-zA-Z$_][a-zA-Z$_0-9]*) +#Line number is optional in special cases 'Native method' or 'Unknown source' +JAVASTACKTRACEPART %{SPACE}at %{JAVACLASS:class}\.%{JAVAMETHOD:method}\(%{JAVAFILE:file}(?::%{NUMBER:line})?\) +# Java Logs +JAVATHREAD (?:[A-Z]{2}-Processor[\d]+) +##JAVACLASS (?:[a-zA-Z0-9-]+\.)+[A-Za-z0-9$]+ +##JAVAFILE (?:[A-Za-z0-9_.-]+) +##JAVASTACKTRACEPART at %{JAVACLASS:class}\.%{WORD:method}\(%{JAVAFILE:file}:%{NUMBER:line}\) +JAVALOGMESSAGE (.*) +# MMM dd, yyyy HH:mm:ss eg: Jan 9, 2014 7:13:13 AM +CATALINA_DATESTAMP %{MONTH} %{MONTHDAY}, 20%{YEAR} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) (?:AM|PM) +# yyyy-MM-dd HH:mm:ss,SSS ZZZ eg: 2014-01-09 17:32:25,527 -0800 +TOMCAT_DATESTAMP 20%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) %{ISO8601_TIMEZONE} +CATALINALOG %{CATALINA_DATESTAMP:timestamp} %{JAVACLASS:class} %{JAVALOGMESSAGE:logmessage} +# 2014-01-09 20:03:28,269 -0800 | ERROR | com.example.service.ExampleService - something compeletely unexpected happened... +TOMCATLOG %{TOMCAT_DATESTAMP:timestamp} \| %{LOGLEVEL:level} \| %{JAVACLASS:class} - %{JAVALOGMESSAGE:logmessage} \ No newline at end of file diff --git a/config/patterns/junos b/config/patterns/junos new file mode 100644 index 0000000..2da91cc --- /dev/null +++ b/config/patterns/junos @@ -0,0 +1,8 @@ +# JUNOS 11.4 RT_FLOW patterns +RT_FLOW_EVENT (RT_FLOW_SESSION_CREATE|RT_FLOW_SESSION_CLOSE|RT_FLOW_SESSION_DENY) + +RT_FLOW1 %{RT_FLOW_EVENT:event}: %{GREEDYDATA:close-reason}: %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} \d+\(%{DATA:sent}\) \d+\(%{DATA:received}\) %{INT:elapsed-time} .* + +RT_FLOW2 %{RT_FLOW_EVENT:event}: session created %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{IP:nat-src-ip}/%{INT:nat-src-port}->%{IP:nat-dst-ip}/%{INT:nat-dst-port} %{DATA:src-nat-rule-name} %{DATA:dst-nat-rule-name} %{INT:protocol-id} %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} %{INT:session-id} .* + +RT_FLOW3 %{RT_FLOW_EVENT:event}: session denied %{IP:src-ip}/%{INT:src-port}->%{IP:dst-ip}/%{INT:dst-port} %{DATA:service} %{INT:protocol-id}\(\d\) %{DATA:policy-name} %{DATA:from-zone} %{DATA:to-zone} .* diff --git a/config/patterns/linux-syslog b/config/patterns/linux-syslog new file mode 100644 index 0000000..0911964 --- /dev/null +++ b/config/patterns/linux-syslog @@ -0,0 +1,16 @@ +SYSLOG5424PRINTASCII [!-~]+ + +SYSLOGBASE2 (?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:logsource}+(?: %{SYSLOGPROG}:|) +SYSLOGPAMSESSION %{SYSLOGBASE} %{GREEDYDATA:message}%{WORD:pam_module}\(%{DATA:pam_caller}\): session %{WORD:pam_session_state} for user %{USERNAME:username}(?: by %{GREEDYDATA:pam_by})? + +CRON_ACTION [A-Z ]+ +CRONLOG %{SYSLOGBASE} \(%{USER:user}\) %{CRON_ACTION:action} \(%{DATA:message}\) + +SYSLOGLINE %{SYSLOGBASE2} %{GREEDYDATA:message} + +# IETF 5424 syslog(8) format (see http://www.rfc-editor.org/info/rfc5424) +SYSLOG5424PRI <%{NONNEGINT:syslog5424_pri}> +SYSLOG5424SD \[%{DATA}\]+ +SYSLOG5424BASE %{SYSLOG5424PRI}%{NONNEGINT:syslog5424_ver} +(?:%{TIMESTAMP_ISO8601:syslog5424_ts}|-) +(?:%{HOSTNAME:syslog5424_host}|-) +(-|%{SYSLOG5424PRINTASCII:syslog5424_app}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_proc}) +(-|%{SYSLOG5424PRINTASCII:syslog5424_msgid}) +(?:%{SYSLOG5424SD:syslog5424_sd}|-|) + +SYSLOG5424LINE %{SYSLOG5424BASE} +%{GREEDYDATA:syslog5424_msg} \ No newline at end of file diff --git a/config/patterns/mcollective b/config/patterns/mcollective new file mode 100644 index 0000000..0389cc3 --- /dev/null +++ b/config/patterns/mcollective @@ -0,0 +1,4 @@ +# Remember, these can be multi-line events. +MCOLLECTIVE ., \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:pid}\]%{SPACE}%{LOGLEVEL:event_level} + +MCOLLECTIVEAUDIT %{TIMESTAMP_ISO8601:timestamp}: \ No newline at end of file diff --git a/config/patterns/modsecurity b/config/patterns/modsecurity new file mode 100644 index 0000000..0c614dc --- /dev/null +++ b/config/patterns/modsecurity @@ -0,0 +1,18 @@ +APACHEERRORTIME %{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR} +APACHEERRORPREFIX \[%{APACHEERRORTIME:timestamp}\] \[%{NOTSPACE:apacheseverity}\] (\[pid %{INT}:tid %{INT}\] )?\[client %{IPORHOST:sourcehost}(:%{INT:source_port})?\] (\[client %{IPORHOST}\])? +GENERICAPACHEERROR %{APACHEERRORPREFIX} %{GREEDYDATA:message} +MODSECPREFIX %{APACHEERRORPREFIX} ModSecurity: %{NOTSPACE:modsecseverity}\. %{GREEDYDATA:modsecmessage} +MODSECRULEFILE \[file %{QUOTEDSTRING:rulefile}\] +MODSECRULELINE \[line %{QUOTEDSTRING:ruleline}\] +MODSECMATCHOFFSET \[offset %{QUOTEDSTRING:matchoffset}\] +MODSECRULEID \[id %{QUOTEDSTRING:ruleid}\] +MODSECRULEREV \[rev %{QUOTEDSTRING:rulerev}\] +MODSECRULEMSG \[msg %{QUOTEDSTRING:rulemessage}\] +MODSECRULEDATA \[data %{QUOTEDSTRING:ruledata}\] +MODSECRULESEVERITY \[severity ["']%{WORD:ruleseverity}["']\] +MODSECRULEVERS \[ver "[^"]+"\] +MODSECRULETAGS (?:\[tag %{QUOTEDSTRING:ruletag0}\] )?(?:\[tag %{QUOTEDSTRING:ruletag1}\] )?(?:\[tag %{QUOTEDSTRING:ruletag2}\] )?(?:\[tag %{QUOTEDSTRING:ruletag3}\] )?(?:\[tag %{QUOTEDSTRING:ruletag4}\] )?(?:\[tag %{QUOTEDSTRING:ruletag5}\] )?(?:\[tag %{QUOTEDSTRING:ruletag6}\] )?(?:\[tag %{QUOTEDSTRING:ruletag7}\] )?(?:\[tag %{QUOTEDSTRING:ruletag8}\] )?(?:\[tag %{QUOTEDSTRING:ruletag9}\] )?(?:\[tag %{QUOTEDSTRING}\] )* +MODSECHOSTNAME \[hostname ['"]%{DATA:targethost}["']\] +MODSECURI \[uri ["']%{DATA:targeturi}["']\] +MODSECUID \[unique_id %{QUOTEDSTRING:uniqueid}\] +MODSECAPACHEERROR %{MODSECPREFIX} %{MODSECRULEFILE} %{MODSECRULELINE} (?:%{MODSECMATCHOFFSET} )?(?:%{MODSECRULEID} )?(?:%{MODSECRULEREV} )?(?:%{MODSECRULEMSG} )?(?:%{MODSECRULEDATA} )?(?:%{MODSECRULESEVERITY} )?(?:%{MODSECRULEVERS} )?%{MODSECRULETAGS}%{MODSECHOSTNAME} %{MODSECURI} %{MODSECUID} \ No newline at end of file diff --git a/config/patterns/mongodb b/config/patterns/mongodb new file mode 100644 index 0000000..126a2a5 --- /dev/null +++ b/config/patterns/mongodb @@ -0,0 +1,7 @@ +MONGO_LOG %{SYSLOGTIMESTAMP:timestamp} \[%{WORD:component}\] %{GREEDYDATA:message} +MONGO_QUERY \{ \{ .* \} ntoreturn: \} +MONGO_WORDDASH \b[\w-]+\b +MONGO_SLOWQUERY %{WORD} %{MONGO_WORDDASH:database}\.%{MONGO_WORDDASH:collection} %{WORD}: %{MONGO_QUERY:query} %{WORD}:%{NONNEGINT:ntoreturn} %{WORD}:%{NONNEGINT:ntoskip} %{WORD}:%{NONNEGINT:nscanned}.*nreturned:%{NONNEGINT:nreturned}..+ %{POSINT:duration}ms +MONGO3_SEVERITY \w +MONGO3_COMPONENT %{WORD}|- +MONGO3_LOG %{TIMESTAMP_ISO8601:timestamp} %{MONGO3_SEVERITY:severity} %{MONGO3_COMPONENT:component}%{SPACE}(?:\[%{DATA:context}\])? %{GREEDYDATA:message} \ No newline at end of file diff --git a/config/patterns/mysql b/config/patterns/mysql new file mode 100644 index 0000000..141a0c0 --- /dev/null +++ b/config/patterns/mysql @@ -0,0 +1 @@ +MYSQL_AUTH_FAIL %{TIMESTAMP_ISO8601:time} %{NUMBER} \[Note\] Access denied for user '%{DATA:user}'@'%{IP:source_ip}' \(using password: %{WORD:using_password}\) diff --git a/config/patterns/nagios b/config/patterns/nagios new file mode 100644 index 0000000..5dcba0b --- /dev/null +++ b/config/patterns/nagios @@ -0,0 +1,124 @@ +################################################################################## +################################################################################## +# Chop Nagios log files to smithereens! +# +# A set of GROK filters to process logfiles generated by Nagios. +# While it does not, this set intends to cover all possible Nagios logs. +# +# Some more work needs to be done to cover all External Commands: +# http://old.nagios.org/developerinfo/externalcommands/commandlist.php +# +# If you need some support on these rules please contact: +# Jelle Smet http://smetj.net +# +################################################################################# +################################################################################# + +NAGIOSTIME \[%{NUMBER:nagios_epoch}\] + +############################################### +######## Begin nagios log types +############################################### +NAGIOS_TYPE_CURRENT_SERVICE_STATE CURRENT SERVICE STATE +NAGIOS_TYPE_CURRENT_HOST_STATE CURRENT HOST STATE + +NAGIOS_TYPE_SERVICE_NOTIFICATION SERVICE NOTIFICATION +NAGIOS_TYPE_HOST_NOTIFICATION HOST NOTIFICATION + +NAGIOS_TYPE_SERVICE_ALERT SERVICE ALERT +NAGIOS_TYPE_HOST_ALERT HOST ALERT + +NAGIOS_TYPE_SERVICE_FLAPPING_ALERT SERVICE FLAPPING ALERT +NAGIOS_TYPE_HOST_FLAPPING_ALERT HOST FLAPPING ALERT + +NAGIOS_TYPE_SERVICE_DOWNTIME_ALERT SERVICE DOWNTIME ALERT +NAGIOS_TYPE_HOST_DOWNTIME_ALERT HOST DOWNTIME ALERT + +NAGIOS_TYPE_PASSIVE_SERVICE_CHECK PASSIVE SERVICE CHECK +NAGIOS_TYPE_PASSIVE_HOST_CHECK PASSIVE HOST CHECK + +NAGIOS_TYPE_SERVICE_EVENT_HANDLER SERVICE EVENT HANDLER +NAGIOS_TYPE_HOST_EVENT_HANDLER HOST EVENT HANDLER + +NAGIOS_TYPE_EXTERNAL_COMMAND EXTERNAL COMMAND +NAGIOS_TYPE_TIMEPERIOD_TRANSITION TIMEPERIOD TRANSITION +############################################### +######## End nagios log types +############################################### + +############################################### +######## Begin external check types +############################################### +NAGIOS_EC_DISABLE_SVC_CHECK DISABLE_SVC_CHECK +NAGIOS_EC_ENABLE_SVC_CHECK ENABLE_SVC_CHECK +NAGIOS_EC_DISABLE_HOST_CHECK DISABLE_HOST_CHECK +NAGIOS_EC_ENABLE_HOST_CHECK ENABLE_HOST_CHECK +NAGIOS_EC_PROCESS_SERVICE_CHECK_RESULT PROCESS_SERVICE_CHECK_RESULT +NAGIOS_EC_PROCESS_HOST_CHECK_RESULT PROCESS_HOST_CHECK_RESULT +NAGIOS_EC_SCHEDULE_SERVICE_DOWNTIME SCHEDULE_SERVICE_DOWNTIME +NAGIOS_EC_SCHEDULE_HOST_DOWNTIME SCHEDULE_HOST_DOWNTIME +NAGIOS_EC_DISABLE_HOST_SVC_NOTIFICATIONS DISABLE_HOST_SVC_NOTIFICATIONS +NAGIOS_EC_ENABLE_HOST_SVC_NOTIFICATIONS ENABLE_HOST_SVC_NOTIFICATIONS +NAGIOS_EC_DISABLE_HOST_NOTIFICATIONS DISABLE_HOST_NOTIFICATIONS +NAGIOS_EC_ENABLE_HOST_NOTIFICATIONS ENABLE_HOST_NOTIFICATIONS +NAGIOS_EC_DISABLE_SVC_NOTIFICATIONS DISABLE_SVC_NOTIFICATIONS +NAGIOS_EC_ENABLE_SVC_NOTIFICATIONS ENABLE_SVC_NOTIFICATIONS +############################################### +######## End external check types +############################################### +NAGIOS_WARNING Warning:%{SPACE}%{GREEDYDATA:nagios_message} + +NAGIOS_CURRENT_SERVICE_STATE %{NAGIOS_TYPE_CURRENT_SERVICE_STATE:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statetype};%{DATA:nagios_statecode};%{GREEDYDATA:nagios_message} +NAGIOS_CURRENT_HOST_STATE %{NAGIOS_TYPE_CURRENT_HOST_STATE:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statetype};%{DATA:nagios_statecode};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_NOTIFICATION %{NAGIOS_TYPE_SERVICE_NOTIFICATION:nagios_type}: %{DATA:nagios_notifyname};%{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_contact};%{GREEDYDATA:nagios_message} +NAGIOS_HOST_NOTIFICATION %{NAGIOS_TYPE_HOST_NOTIFICATION:nagios_type}: %{DATA:nagios_notifyname};%{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_contact};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_ALERT %{NAGIOS_TYPE_SERVICE_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{NUMBER:nagios_attempt};%{GREEDYDATA:nagios_message} +NAGIOS_HOST_ALERT %{NAGIOS_TYPE_HOST_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{NUMBER:nagios_attempt};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_FLAPPING_ALERT %{NAGIOS_TYPE_SERVICE_FLAPPING_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_message} +NAGIOS_HOST_FLAPPING_ALERT %{NAGIOS_TYPE_HOST_FLAPPING_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_message} + +NAGIOS_SERVICE_DOWNTIME_ALERT %{NAGIOS_TYPE_SERVICE_DOWNTIME_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} +NAGIOS_HOST_DOWNTIME_ALERT %{NAGIOS_TYPE_HOST_DOWNTIME_ALERT:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} + +NAGIOS_PASSIVE_SERVICE_CHECK %{NAGIOS_TYPE_PASSIVE_SERVICE_CHECK:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} +NAGIOS_PASSIVE_HOST_CHECK %{NAGIOS_TYPE_PASSIVE_HOST_CHECK:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_comment} + +NAGIOS_SERVICE_EVENT_HANDLER %{NAGIOS_TYPE_SERVICE_EVENT_HANDLER:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{DATA:nagios_event_handler_name} +NAGIOS_HOST_EVENT_HANDLER %{NAGIOS_TYPE_HOST_EVENT_HANDLER:nagios_type}: %{DATA:nagios_hostname};%{DATA:nagios_state};%{DATA:nagios_statelevel};%{DATA:nagios_event_handler_name} + +NAGIOS_TIMEPERIOD_TRANSITION %{NAGIOS_TYPE_TIMEPERIOD_TRANSITION:nagios_type}: %{DATA:nagios_service};%{DATA:nagios_unknown1};%{DATA:nagios_unknown2} + +#################### +#### External checks +#################### + +#Disable host & service check +NAGIOS_EC_LINE_DISABLE_SVC_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_SVC_CHECK:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service} +NAGIOS_EC_LINE_DISABLE_HOST_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_CHECK:nagios_command};%{DATA:nagios_hostname} + +#Enable host & service check +NAGIOS_EC_LINE_ENABLE_SVC_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_SVC_CHECK:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service} +NAGIOS_EC_LINE_ENABLE_HOST_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_CHECK:nagios_command};%{DATA:nagios_hostname} + +#Process host & service check +NAGIOS_EC_LINE_PROCESS_SERVICE_CHECK_RESULT %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_PROCESS_SERVICE_CHECK_RESULT:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_service};%{DATA:nagios_state};%{GREEDYDATA:nagios_check_result} +NAGIOS_EC_LINE_PROCESS_HOST_CHECK_RESULT %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_PROCESS_HOST_CHECK_RESULT:nagios_command};%{DATA:nagios_hostname};%{DATA:nagios_state};%{GREEDYDATA:nagios_check_result} + +#Disable host & service notifications +NAGIOS_EC_LINE_DISABLE_HOST_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_SVC_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_DISABLE_HOST_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_HOST_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_DISABLE_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_DISABLE_SVC_NOTIFICATIONS:nagios_command};%{DATA:nagios_hostname};%{GREEDYDATA:nagios_service} + +#Enable host & service notifications +NAGIOS_EC_LINE_ENABLE_HOST_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_SVC_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_ENABLE_HOST_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_HOST_NOTIFICATIONS:nagios_command};%{GREEDYDATA:nagios_hostname} +NAGIOS_EC_LINE_ENABLE_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_ENABLE_SVC_NOTIFICATIONS:nagios_command};%{DATA:nagios_hostname};%{GREEDYDATA:nagios_service} + +#Schedule host & service downtime +NAGIOS_EC_LINE_SCHEDULE_HOST_DOWNTIME %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios_type}: %{NAGIOS_EC_SCHEDULE_HOST_DOWNTIME:nagios_command};%{DATA:nagios_hostname};%{NUMBER:nagios_start_time};%{NUMBER:nagios_end_time};%{NUMBER:nagios_fixed};%{NUMBER:nagios_trigger_id};%{NUMBER:nagios_duration};%{DATA:author};%{DATA:comment} + +#End matching line +NAGIOSLOGLINE %{NAGIOSTIME} (?:%{NAGIOS_WARNING}|%{NAGIOS_CURRENT_SERVICE_STATE}|%{NAGIOS_CURRENT_HOST_STATE}|%{NAGIOS_SERVICE_NOTIFICATION}|%{NAGIOS_HOST_NOTIFICATION}|%{NAGIOS_SERVICE_ALERT}|%{NAGIOS_HOST_ALERT}|%{NAGIOS_SERVICE_FLAPPING_ALERT}|%{NAGIOS_HOST_FLAPPING_ALERT}|%{NAGIOS_SERVICE_DOWNTIME_ALERT}|%{NAGIOS_HOST_DOWNTIME_ALERT}|%{NAGIOS_PASSIVE_SERVICE_CHECK}|%{NAGIOS_PASSIVE_HOST_CHECK}|%{NAGIOS_SERVICE_EVENT_HANDLER}|%{NAGIOS_HOST_EVENT_HANDLER}|%{NAGIOS_TIMEPERIOD_TRANSITION}|%{NAGIOS_EC_LINE_DISABLE_SVC_CHECK}|%{NAGIOS_EC_LINE_ENABLE_SVC_CHECK}|%{NAGIOS_EC_LINE_DISABLE_HOST_CHECK}|%{NAGIOS_EC_LINE_ENABLE_HOST_CHECK}|%{NAGIOS_EC_LINE_PROCESS_HOST_CHECK_RESULT}|%{NAGIOS_EC_LINE_PROCESS_SERVICE_CHECK_RESULT}|%{NAGIOS_EC_LINE_SCHEDULE_HOST_DOWNTIME}|%{NAGIOS_EC_LINE_DISABLE_HOST_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_HOST_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_DISABLE_HOST_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_HOST_NOTIFICATIONS}|%{NAGIOS_EC_LINE_DISABLE_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_SVC_NOTIFICATIONS}) \ No newline at end of file diff --git a/config/patterns/nginx b/config/patterns/nginx new file mode 100644 index 0000000..92982fc --- /dev/null +++ b/config/patterns/nginx @@ -0,0 +1,19 @@ +NGUSERNAME [a-zA-Z\.\@\-\+_%]+ +NGUSER %{NGUSERNAME} + +# '$remote_addr - $remote_user [$time_local] ' +# '"$request" $status $body_bytes_sent ' +# '"$http_referer" "$http_user_agent"'; + +# 127.0.0.1 - - [28/Jan/2016:14:19:36 +0300] "GET /zero.html HTTP/1.1" 200 398 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36" + +NOTDQUOTE [^"]* +DAY2 \d{2} + +#NGINXERRTIME %{YEAR:year}/%{MONTHNUM2:month}/%{DAY2:day} %{HOUR:hour}:%{MINUTE:minute}:%{SECOND:second} +NGINXERRTIME %{YEAR}/%{MONTHNUM2}/%{DAY2} %{HOUR}:%{MINUTE}:%{SECOND} + +NGINXACCESS %{IPORHOST:remote_addr} - %{NGUSER:remote_user} \[%{HTTPDATE:time_local}\] "%{WORD:method} %{URIPATHPARAM:request} HTTP/%{NUMBER:http_version}" %{NUMBER:status} %{NUMBER:body_bytes_sent} "%{NOTDQUOTE:http_referer}" "%{NOTDQUOTE:http_user_agent}" + +# YYYY/MM/DD HH:MM:SS [LEVEL] PID#TID: *CID MESSAGE +NGINXERROR %{NGINXERRTIME:time} \[%{LOGLEVEL:loglevel}\] %{NONNEGINT:pid}#%{NONNEGINT:tid}: (\*%{NONNEGINT:cid} )?%{GREEDYDATA:message} diff --git a/config/patterns/paths b/config/patterns/paths new file mode 100644 index 0000000..a4f0194 --- /dev/null +++ b/config/patterns/paths @@ -0,0 +1,14 @@ + +#DIR ^.*/ +#FILE [^/].*$ + +#URI_SPLIT ^%{GREEDYDATA:request}\?%{GREEDYDATA:http_args}$ +#FULLPATH_SPLITTER %{DIR:prefix_directory}%{FILE:file_name} + + +NAXSI_FMT ^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}&learning=\d&vers=%{DATA:naxsi_version}&total_processed=\d+&total_blocked=\d+&block=\d+(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&zone0=%{WORD:zone} +#^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}&learning=\d&vers=%{DATA:naxsi_version}&total_processed=\d+&total_blocked=\d+&block=\d+(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&cscore2 +#^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&cscore2 +#^NAXSI_FMT: ip=%{IPORHOST:src_ip}&server=%{IPORHOST:target_ip}&uri=%{PATH:http_path}&learning=\d&vers=%{DATA:naxsi_version}&total_processed=\d+&total_blocked=\d+&block=\d+(&cscore\d=%{WORD:score_label}&score\d=%{INT:score})+&cscore2 + +NAXSI_EXLOG ^NAXSI_EXLOG: ip=%{IPORHOST:naxsi_src_ip}&server=%{IPORHOST:naxsi_dst_ip}&uri=%{PATH:http_path}&id=%{INT:naxsi_id}&zone=%{WORD:naxsi_zone}&var_name=%{DATA:naxsi_var_name}&content= diff --git a/config/patterns/postgresql b/config/patterns/postgresql new file mode 100644 index 0000000..6d2b984 --- /dev/null +++ b/config/patterns/postgresql @@ -0,0 +1,2 @@ +# Default postgresql pg_log format pattern +POSTGRESQL %{DATESTAMP:timestamp} %{TZ} %{DATA:user_id} %{GREEDYDATA:connection_id} %{POSINT:pid} \ No newline at end of file diff --git a/config/patterns/rails b/config/patterns/rails new file mode 100644 index 0000000..04e4c56 --- /dev/null +++ b/config/patterns/rails @@ -0,0 +1,18 @@ +RUUID \s{32} +# rails controller with action +RAILS_CONSTROLLER [^#]+ +RAIL_ACTION \w+ +RCONTROLLER %{RAILS_CONSTROLLER:controller}#%{RAIL_ACTION:action} + +# this will often be the only line: +RAILS_TIMESTAMP %{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{ISO8601_TIMEZONE} +RAILS3HEAD (?m)Started %{WORD:verb} "%{URIPATHPARAM:request}" for %{IPORHOST:clientip} at %{RAILS_TIMESTAMP:timestamp} +# for some a strange reason, params are stripped of {} - not sure that's a good idea. +RPROCESSING \W*Processing by %{RCONTROLLER} as %{NOTSPACE:format}(?:\W*Parameters: {%{DATA:params}}\W*)? +RAILS3PROFILE (?:\(Views: %{NUMBER:viewms}ms \| ActiveRecord: %{NUMBER:activerecordms}ms|\(ActiveRecord: %{NUMBER:activerecordms}ms)? +RAILS3FOOT Completed %{NUMBER:response}%{DATA} in %{NUMBER:totalms}ms %{RAILS3PROFILE}%{GREEDYDATA} + +RAILS_CONTEXT (?:%{DATA}\n)* + +# putting it all together +RAILS3 %{RAILS3HEAD}(?:%{RPROCESSING})?%{RAILS_CONTEXT:context}(?:%{RAILS3FOOT})? \ No newline at end of file diff --git a/config/patterns/redis b/config/patterns/redis new file mode 100644 index 0000000..6a005a8 --- /dev/null +++ b/config/patterns/redis @@ -0,0 +1,21 @@ + +# +# Format 1: +# +# [43569] 27 Aug 12:38:58.471 * RDB: 12 MB of memory used by copy-on-write +# + +# +# Format 2: +# +# 31493:M 17 Sep 09:02:54.807 # Server started, Redis version 3.0.2 +# 31493:M 17 Sep 09:02:54.807 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm$ +# 31493:M 17 Sep 09:02:54.807 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. +# 31493:M 17 Sep 09:02:54.807 * DB loaded from disk: 0.000 seconds +# 31493:M 17 Sep 09:02:54.807 * The server is now ready to accept connections on port 6379 +# + +REDISTIMESTAMP %{MONTHDAY} %{MONTH} %{TIME} +REDISLOG \[%{POSINT:pid}\] %{REDISTIMESTAMP:time} \*\s +REDISLOG1 %{REDISLOG} +REDISLOG2 %{POSINT:pid}:M %{REDISTIMESTAMP:time} [*#] %{GREEDYDATA:message} \ No newline at end of file diff --git a/config/patterns/ruby b/config/patterns/ruby new file mode 100644 index 0000000..845ba0d --- /dev/null +++ b/config/patterns/ruby @@ -0,0 +1,2 @@ +RUBY_LOGLEVEL DEBUG|FATAL|ERROR|WARN|INFO +RUBY_LOGGER [DFEWI], \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:pid}\] *%{RUBY_LOGLEVEL:loglevel} -- +%{DATA:progname}: %{GREEDYDATA:message} \ No newline at end of file diff --git a/config/patterns/smb b/config/patterns/smb new file mode 100644 index 0000000..38b1f4d --- /dev/null +++ b/config/patterns/smb @@ -0,0 +1 @@ +SMB_AUTH_FAIL Auth:%{GREEDYDATA} user \[%{DATA:smb_domain}\]\\\[%{DATA:user}\]%{GREEDYDATA} status \[NT_STATUS_NO_SUCH_USER\]%{GREEDYDATA} remote host \[ipv4:%{IP:ip_source} \ No newline at end of file diff --git a/config/patterns/ssh b/config/patterns/ssh new file mode 100644 index 0000000..bf9fd1e --- /dev/null +++ b/config/patterns/ssh @@ -0,0 +1,61 @@ +# sshd grok pattern + +# Start/Stop +SSHD_LISTEN Server listening on %{IP:sshd_listen_ip} port %{NUMBER:sshd_listen_port}. +SSHD_TERMINATE Received signal %{NUMBER:sshd_signal}; terminating. + +# SSH Tunnel +SSHD_TUNN_ERR1 error: connect_to %{IP:sshd_listen_ip} port %{NUMBER:sshd_listen_port}: failed. +SSHD_TUNN_ERR2 error: channel_setup_fwd_listener: cannot listen to port: %{NUMBER:sshd_listen_port} +SSHD_TUNN_ERR3 error: bind: Address already in use +SSHD_TUNN_ERR4 error: channel_setup_fwd_listener_tcpip: cannot listen to port: %{NUMBER:sshd_listen_port} +SSHD_TUNN_TIMEOUT Timeout, client not responding. + +# Normal +SSHD_SUCCESS Accepted %{WORD:sshd_auth_type} for %{USERNAME:sshd_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol}: %{GREEDYDATA:sshd_cipher} +SSHD_DISCONNECT Received disconnect from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:%{NUMBER:sshd_disconnect_code}: %{GREEDYDATA:sshd_disconnect_status} +SSHD_CONN_CLOSE Connection closed by %{IP:sshd_client_ip}$ +SSHD_SESSION_OPEN pam_unix\(sshd:session\): session opened for user %{USERNAME:sshd_user} by \(uid=\d+\) +SSHD_SESSION_CLOSE pam_unix\(sshd:session\): session closed for user %{USERNAME:sshd_user} +SSHD_SESSION_FAIL pam_systemd\(sshd:session\): Failed to release session: %{GREEDYDATA:sshd_disconnect_status} +SSHD_LOGOUT_ERR syslogin_perform_logout: logout\(\) returned an error + +# Probe +SSHD_REFUSE_CONN refused connect from %{DATA:sshd_client_hostname} \(%{IPORHOST:sshd_client_ip}\) +SSHD_TCPWRAP_FAIL1 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: can't verify hostname: getaddrinfo\(%{DATA:sshd_paranoid_hostname}, %{DATA:sshd_sa_family}\) failed +SSHD_TCPWRAP_FAIL2 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/address mismatch: %{IPORHOST:sshd_client_ip} != %{HOSTNAME:sshd_paranoid_hostname} +SSHD_TCPWRAP_FAIL3 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/name mismatch: %{HOSTNAME:sshd_paranoid_hostname_1} != %{HOSTNAME:sshd_paranoid_hostname_2} +SSHD_TCPWRAP_FAIL4 warning: %{DATA:sshd_tcpd_file}, line %{NUMBER}: host name/name mismatch: reverse lookup results in non-FQDN %{HOSTNAME:sshd_paranoid_hostname} +SSHD_TCPWRAP_FAIL5 warning: can't get client address: Connection reset by peer +SSHD_FAIL Failed %{WORD:sshd_auth_type} for %{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol} +SSHD_USER_FAIL Failed password for invalid user %{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol} +SSHD_INVAL_USER Invalid user\s*%{USERNAME:sshd_invalid_user}? from %{IP:sshd_client_ip} + +# preauth +SSHD_DISC_PREAUTH Disconnected from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_MAXE_PREAUTH error: maximum authentication attempts exceeded for (?:invalid user |)%{USERNAME:sshd_invalid_user} from %{IP:sshd_client_ip} port %{NUMBER:sshd_port} %{WORD:sshd_protocol}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_DISR_PREAUTH Disconnecting: %{GREEDYDATA:sshd_disconnect_status} \[%{GREEDYDATA:sshd_privsep}\] +SSHD_INVA_PREAUTH input_userauth_request: invalid user %{USERNAME:sshd_invalid_user}?\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_REST_PREAUTH Connection reset by %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_CLOS_PREAUTH Connection closed by %{IP:sshd_client_ip} port %{NUMBER:sshd_port}\s*(?:\[%{GREEDYDATA:sshd_privsep}\]|) +SSHD_FAIL_PREAUTH fatal: Unable to negotiate with %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:\s*%{GREEDYDATA:sshd_disconnect_status}? \[%{GREEDYDATA:sshd_privsep}\] +SSHD_FAI2_PREAUTH fatal: %{GREEDYDATA:sshd_fatal_status}: Connection from %{IP:sshd_client_ip} port %{NUMBER:sshd_port}:\s*%{GREEDYDATA:sshd_disconnect_status}? \[%{GREEDYDATA:sshd_privsep}\] +SSHD_BADL_PREAUTH Bad packet length %{NUMBER:sshd_packet_length}. \[%{GREEDYDATA:sshd_privsep}\] + +# Corrupted +SSHD_IDENT_FAIL Did not receive identification string from %{IP:sshd_client_ip} +SSHD_MAPB_FAIL Address %{IP:sshd_client_ip} maps to %{HOSTNAME:sshd_client_hostname}, but this does not map back to the address - POSSIBLE BREAK-IN ATTEMPT! +SSHD_RMAP_FAIL reverse mapping checking getaddrinfo for %{HOSTNAME:sshd_client_hostname} \[%{IP:sshd_client_ip}\] failed - POSSIBLE BREAK-IN ATTEMPT! +SSHD_TOOMANY_AUTH Disconnecting: Too many authentication failures for %{USERNAME:sshd_invalid_user} +SSHD_CORRUPT_MAC Corrupted MAC on input +SSHD_PACKET_CORRUPT Disconnecting: Packet corrupt +SSHD_BAD_VERSION Bad protocol version identification '%{GREEDYDATA}' from %{IP:sshd_client_ip} + +#### +SSHD_INIT %{SSHD_LISTEN}|%{SSHD_TERMINATE} +SSHD_TUNN %{SSHD_TUNN_ERR1}|%{SSHD_TUNN_ERR2}|%{SSHD_TUNN_ERR3}|%{SSHD_TUNN_ERR4}|%{SSHD_TUNN_TIMEOUT} +SSHD_NORMAL_LOG %{SSHD_SUCCESS}|%{SSHD_DISCONNECT}|%{SSHD_CONN_CLOSE}|%{SSHD_SESSION_OPEN}|%{SSHD_SESSION_CLOSE}|%{SSHD_SESSION_FAIL}|%{SSHD_LOGOUT_ERR} +SSHD_PROBE_LOG %{SSHD_REFUSE_CONN}|%{SSHD_TCPWRAP_FAIL1}|%{SSHD_TCPWRAP_FAIL2}|%{SSHD_TCPWRAP_FAIL3}|%{SSHD_TCPWRAP_FAIL4}|%{SSHD_TCPWRAP_FAIL5}|%{SSHD_FAIL}|%{SSHD_USER_FAIL}|%{SSHD_INVAL_USER} +SSHD_PREAUTH %{SSHD_DISC_PREAUTH}|%{SSHD_MAXE_PREAUTH}|%{SSHD_DISR_PREAUTH}|%{SSHD_INVA_PREAUTH}|%{SSHD_REST_PREAUTH}|%{SSHD_FAIL_PREAUTH}|%{SSHD_CLOS_PREAUTH}|%{SSHD_FAI2_PREAUTH}|%{SSHD_BADL_PREAUTH} +SSHD_CORRUPTED %{SSHD_IDENT_FAIL}|%{SSHD_MAPB_FAIL}|%{SSHD_RMAP_FAIL}|%{SSHD_TOOMANY_AUTH}|%{SSHD_CORRUPT_MAC}|%{SSHD_PACKET_CORRUPT}|%{SSHD_BAD_VERSION} +SSHD_LOG %{SSHD_INIT}|%{SSHD_NORMAL_LOG}|%{SSHD_PROBE_LOG}|%{SSHD_CORRUPTED}|%{SSHD_TUNN}|%{SSHD_PREAUTH} diff --git a/config/patterns/tcpdump b/config/patterns/tcpdump new file mode 100644 index 0000000..8c76105 --- /dev/null +++ b/config/patterns/tcpdump @@ -0,0 +1 @@ +TCPDUMP_OUTPUT %{GREEDYDATA:timestamp} IP %{IPORHOST:source_ip}\.%{INT:source_port} > %{IPORHOST:dest_ip}\.%{INT:dest_port}: Flags \[%{GREEDYDATA:tcpflags}\], seq diff --git a/config/profiles.yaml b/config/profiles.yaml new file mode 100644 index 0000000..9d81c92 --- /dev/null +++ b/config/profiles.yaml @@ -0,0 +1,14 @@ +name: default_ip_remediation +#debug: true +filters: + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ban + duration: 4h +#duration_expr: Sprintf('%dh', (GetDecisionsCount(Alert.GetValue()) + 1) * 4) +# notifications: +# - slack_default # Set the webhook in /etc/crowdsec/notifications/slack.yaml before enabling this. +# - splunk_default # Set the splunk url and token in /etc/crowdsec/notifications/splunk.yaml before enabling this. +# - http_default # Set the required http parameters in /etc/crowdsec/notifications/http.yaml before enabling this. +# - email_default # Set the required email parameters in /etc/crowdsec/notifications/email.yaml before enabling this. +on_success: break diff --git a/config/simulation.yaml b/config/simulation.yaml new file mode 100644 index 0000000..e9c6899 --- /dev/null +++ b/config/simulation.yaml @@ -0,0 +1,4 @@ +simulation: off +# exclusions: +# - crowdsecurity/ssh-bf + \ No newline at end of file diff --git a/config/user.yaml b/config/user.yaml new file mode 100644 index 0000000..f3ab214 --- /dev/null +++ b/config/user.yaml @@ -0,0 +1,40 @@ +common: + daemonize: false + pid_dir: /var/run/ + log_media: stdout + log_level: info + log_dir: /var/log/ + working_dir: . +config_paths: + config_dir: /etc/crowdsec/ + data_dir: /var/lib/crowdsec/data + #simulation_path: /etc/crowdsec/config/simulation.yaml + #hub_dir: /etc/crowdsec/hub/ + #index_path: ./config/hub/.index.json +crowdsec_service: + #acquisition_path: ./config/acquis.yaml + parser_routines: 1 +cscli: + output: human +db_config: + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + user: crowdsec + #log_level: info + password: crowdsec + db_name: crowdsec + host: "127.0.0.1" + port: 3306 +api: + client: + insecure_skip_verify: false # default true + credentials_path: /etc/crowdsec/local_api_credentials.yaml + server: + #log_level: info + listen_uri: 127.0.0.1:8080 + profiles_path: /etc/crowdsec/profiles.yaml + online_client: # Central API + credentials_path: /etc/crowdsec/online_api_credentials.yaml +prometheus: + enabled: true + level: full diff --git a/debian/.gitignore b/debian/.gitignore new file mode 100644 index 0000000..325e494 --- /dev/null +++ b/debian/.gitignore @@ -0,0 +1,7 @@ +# Generated during the build +/crowdsec +/files +/*.substvars +/*.log +/*.debhelper +/*-stamp diff --git a/debian/README.md b/debian/README.md new file mode 100644 index 0000000..82e43e9 --- /dev/null +++ b/debian/README.md @@ -0,0 +1,14 @@ + +# Building Debian/Ubuntu packages + +It is not recommended to build your own packages for production environments. + +However, if you want to experiment and contribute: + +* Update the changelog (at least give it a correct version number) +* Run "QUILT_PATCHES=debian/patches quilt push -a && quilt refresh" + +We do the above in the build pipeline, so you'll have to do it manually before running: + +* dpkg-buildpackage -uc -us -b + diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 0000000..ee87a4b --- /dev/null +++ b/debian/changelog @@ -0,0 +1,87 @@ +crowdsec (1.0.13-3) UNRELEASED; urgency=medium + + * Fix small stuff + + -- Manuel Sabban Wed, 26 Apr 2021 09:30:14 +0100 + +crowdsec (1.0.13-2) UNRELEASED; urgency=medium + + * Fix version advertisement + + -- Manuel Sabban Wed, 26 Apr 2021 09:30:14 +0100 + +crowdsec (1.0.13-1) UNRELEASED; urgency=medium + + * Bump to 1.0.13 + + -- Manuel Sabban Wed, 26 Apr 2021 09:30:14 +0100 + +crowdsec (1.0.12-1) UNRELEASED; urgency=medium + + * Bump to 1.0.12 + + -- Manuel Sabban Wed, 07 Apr 2021 15:10:11 +0100 + +crowdsec (1.0.9-5) UNRELEASED; urgency=medium + + * Fix cleanup on remove --purge + + -- Manuel Sabban Wed, 23 Mar 2021 15:17:09 +0100 + +crowdsec (1.0.9-4) UNRELEASED; urgency=medium + + * Fix a bunch a bugs in package management + + -- Manuel Sabban Wed, 23 Mar 2021 15:17:09 +0100 + +crowdsec (1.0.9-3) UNRELEASED; urgency=medium + + * truly fix error generating acquis.yaml + + -- Manuel Sabban Wed, 10 Feb 2021 10:41:06 +0100 + +crowdsec (1.0.9-2) UNRELEASED; urgency=medium + + * fix error generating acquis.yaml + + -- Manuel Sabban Wed, 10 Feb 2021 10:41:06 +0100 + +crowdsec (1.0.9-1) UNRELEASED; urgency=medium + + * bump to 1.0.9 + + -- Manuel Sabban Wed, 10 Feb 2021 10:41:06 +0100 + +crowdsec (1.0.7-4) UNRELEASED; urgency=medium + + * fix crowdsec -version output + + -- Manuel Sabban Wed, 10 Feb 2021 10:41:06 +0100 + +crowdsec (1.0.7-3) UNRELEASED; urgency=medium + + * fix error when docker metabase doesn't exist + * fix crowdsec not stopped when uninstalling + + -- Manuel Sabban Wed, 10 Feb 2021 10:41:06 +0100 + +crowdsec (1.0.7-2) UNRELEASED; urgency=medium + + * remove stop metabase docker container + * purge removes the metabase container once and for all + + -- Manuel Sabban Wed, 10 Feb 2021 10:41:06 +0100 + +crowdsec (1.0.7-1) UNRELEASED; urgency=medium + + * debian package + * bump to 1.0.7 + + -- Manuel Sabban Wed, 10 Feb 2021 10:41:06 +0100 + +crowdsec (1.0.4-1) UNRELEASED; urgency=medium + + * debian package + * bump to 1.0.4 + + -- Manuel Sabban Mon, 08 Feb 2021 09:38:06 +0100 diff --git a/debian/compat b/debian/compat new file mode 100644 index 0000000..b4de394 --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +11 diff --git a/debian/control b/debian/control new file mode 100644 index 0000000..b2d2bac --- /dev/null +++ b/debian/control @@ -0,0 +1,8 @@ +Source: crowdsec +Maintainer: Crowdsec Team +Build-Depends: debhelper, jq, bash, git + +Package: crowdsec +Architecture: any +Description: Crowdsec - An open-source, lightweight agent to detect and respond to bad behaviours. It also automatically benefits from our global community-wide IP reputation database +Depends: coreutils \ No newline at end of file diff --git a/debian/crowdsec.cron.daily b/debian/crowdsec.cron.daily new file mode 120000 index 0000000..3fb31fe --- /dev/null +++ b/debian/crowdsec.cron.daily @@ -0,0 +1 @@ +../config/crowdsec.cron.daily \ No newline at end of file diff --git a/debian/crowdsec.service b/debian/crowdsec.service new file mode 100644 index 0000000..8743a03 --- /dev/null +++ b/debian/crowdsec.service @@ -0,0 +1,16 @@ +[Unit] +Description=Crowdsec agent +After=syslog.target network.target remote-fs.target nss-lookup.target + +[Service] +Type=notify +Environment=LC_ALL=C LANG=C +ExecStartPre=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t +ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml +#ExecStartPost=/bin/sleep 0.1 +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +RestartSec=60 + +[Install] +WantedBy=multi-user.target diff --git a/debian/install b/debian/install new file mode 100644 index 0000000..11c82d0 --- /dev/null +++ b/debian/install @@ -0,0 +1,12 @@ +config/config.yaml etc/crowdsec/ +config/profiles.yaml etc/crowdsec/ +config/simulation.yaml etc/crowdsec/ + +config/patterns/* etc/crowdsec/patterns +config/crowdsec.service lib/systemd/system + +# Referenced configs: +plugins/notifications/slack/slack.yaml etc/crowdsec/notifications/ +plugins/notifications/http/http.yaml etc/crowdsec/notifications/ +plugins/notifications/splunk/splunk.yaml etc/crowdsec/notifications/ +plugins/notifications/email/email.yaml etc/crowdsec/notifications/ diff --git a/debian/patches/config_plugins b/debian/patches/config_plugins new file mode 100644 index 0000000..5773edd --- /dev/null +++ b/debian/patches/config_plugins @@ -0,0 +1,13 @@ +Index: crowdsec/config/config.yaml +=================================================================== +--- crowdsec.orig/config/config.yaml ++++ crowdsec/config/config.yaml +@@ -12,7 +12,7 @@ config_paths: + hub_dir: /etc/crowdsec/hub/ + index_path: /etc/crowdsec/hub/.index.json + notification_dir: /etc/crowdsec/notifications/ +- plugin_dir: /usr/local/lib/crowdsec/plugins/ ++ plugin_dir: /usr/lib/crowdsec/plugins/ + crowdsec_service: + acquisition_path: /etc/crowdsec/acquis.yaml + parser_routines: 1 diff --git a/debian/patches/series b/debian/patches/series new file mode 100644 index 0000000..6af0d37 --- /dev/null +++ b/debian/patches/series @@ -0,0 +1 @@ +config_plugins diff --git a/debian/postinst b/debian/postinst new file mode 100644 index 0000000..a862c88 --- /dev/null +++ b/debian/postinst @@ -0,0 +1,107 @@ +#!/bin/bash + +COLLECTIONS=false +set -e + +# Source debconf library. +. /usr/share/debconf/confmodule + +if [ "$1" = configure ]; then + if [[ ! -d /var/lib/crowdsec/data ]]; then + mkdir -p /var/lib/crowdsec/data + fi + + if [[ -d /var/lib/crowdsec/backup ]]; then + cscli config restore /var/lib/crowdsec/backup/backup.config + rm -rf /var/lib/crowdsec/backup + /usr/bin/cscli hub update + /usr/bin/cscli hub upgrade + systemctl start crowdsec + fi + + . /usr/share/crowdsec/wizard.sh -n + if ! [[ -f /etc/crowdsec/acquis.yaml ]]; then + echo Creating /etc/crowdsec/acquis.yaml + set +e + SILENT=true detect_services + SILENT=true TMP_ACQUIS_FILE_SKIP=skip genacquisition + set -e + COLLECTIONS=true + fi + + if [[ -f /etc/crowdsec/local_api_credentials.yaml ]] ; then + chmod 600 /etc/crowdsec/local_api_credentials.yaml + fi + + if [[ -f /etc/crowdsec/online_api_credentials.yaml ]]; then + chmod 600 /etc/crowdsec/online_api_credentials.yaml + fi + + if [[ ! -f /etc/crowdsec/local_api_credentials.yaml ]] || [[ ! -f /etc/crowdsec/online_api_credentials.yaml ]]; then + if [[ ! -f /etc/crowdsec/local_api_credentials.yaml ]] ; then + install -m 600 /dev/null /etc/crowdsec/local_api_credentials.yaml + fi + if [[ ! -f /etc/crowdsec/online_api_credentials.yaml ]] ; then + install -m 600 /dev/null /etc/crowdsec/online_api_credentials.yaml + fi + + db_input medium crowdsec/lapi || true + db_go || true + + db_get crowdsec/lapi + LAPI=$RET + + if [ "$LAPI" = true ]; then + db_input medium crowdsec/capi || true + db_go || true + + db_get crowdsec/capi + CAPI=$RET + + cscli machines add -a + + if [ "$CAPI" = true ]; then + cscli capi register + fi + + else + db_input medium crowdsec/lapi_host || true + db_go || true + + db_get crowdsec/lapi_host + LAPI_HOST=$RET + sed -i "s/127.0.0.1:8080/$LAPI_HOST/g" /etc/crowdsec/config.yaml + fi + fi + + echo Updating hub + /usr/bin/cscli hub update + if [ "$COLLECTIONS" = true ]; then + set +e + CSCLI_BIN_INSTALLED="/usr/bin/cscli" SILENT=true install_collection + set -e + fi + + + if [[ -f /var/lib/crowdsec/data/crowdsec.db.backup ]]; then + cp /var/lib/crowdsec/data/crowdsec.db.backup /var/lib/crowdsec/data/crowdsec.db + rm -f /var/lib/crowdsec/data/crowdsec.db.backup + fi + + systemctl --quiet is-enabled crowdsec || systemctl unmask crowdsec && systemctl enable crowdsec + + API=$(cscli config show --key "Config.API.Server") + if [ "$API" = "" ] ; then + LAPI=false + else + PORT=$(cscli config show --key "Config.API.Server.ListenURI"|cut -d ":" -f2) + fi + if [ "$LAPI" = false ] || [ -z "$(ss -nlt "sport = ${PORT}" | grep -v ^State)" ] ; then + systemctl start crowdsec + else + echo "Not attempting to start crowdsec, port ${PORT} is already used or lapi was disabled" + echo "This port is configured through /etc/crowdsec/config.yaml and /etc/crowdsec/local_api_credentials.yaml" + fi +fi + +echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c" diff --git a/debian/postrm b/debian/postrm new file mode 100644 index 0000000..61a95ee --- /dev/null +++ b/debian/postrm @@ -0,0 +1,4 @@ +if [ "$1" = "purge" ]; then + find /etc/crowdsec -maxdepth 1 -mindepth 1 | grep -v "bouncer" | xargs rm -rf || echo "" + rm -rf /var/lib/crowdsec +fi diff --git a/debian/preinst b/debian/preinst new file mode 100644 index 0000000..e2485ce --- /dev/null +++ b/debian/preinst @@ -0,0 +1,43 @@ +#!/bin/bash + +set -e + +# Source debconf library. +. /usr/share/debconf/confmodule + + +OLD_MAJOR_VERSION=$(echo $2 | cut -d'.' -f1) +OLD_MINOR_VERSION=$(echo $2 | cut -d'.' -f2) +OLD_PATCH_VERSION=$(echo $2 | cut -d'.' -f3|cut -d'-' -f1) + +NEW_MAJOR_VERSION=$(echo $3 | cut -d'.' -f1) +NEW_MINOR_VERSION=$(echo $3 | cut -d'.' -f2) +NEW_PATCH_VERSION=$(echo $3 | cut -d'.' -f3|cut -d'-' -f1) + + + +if [ "$1" = upgrade ]; then + + OLD_MAJOR_VERSION=$(echo $2 | cut -d'.' -f1) + OLD_MINOR_VERSION=$(echo $2 | cut -d'.' -f2) + OLD_PATCH_VERSION=$(echo $2 | cut -d'.' -f3|cut -d'-' -f1) + + NEW_MAJOR_VERSION=$(echo $3 | cut -d'.' -f1) + NEW_MINOR_VERSION=$(echo $3 | cut -d'.' -f2) + NEW_PATCH_VERSION=$(echo $3 | cut -d'.' -f3|cut -d'-' -f1) + + + if [[ $OLD_MAJOR_VERSION -eq "1" ]] && [[ $OLD_MINOR_VERSION -eq "0" ]] && [[ $OLD_PATCH_VERSION -lt "9" ]]; then + if [[ -f /var/lib/crowdsec/data/crowdsec.db ]]; then + cp /var/lib/crowdsec/data/crowdsec.db /var/lib/crowdsec/data/crowdsec.db.backup + fi + fi + + if [[ $NEW_MAJOR_VERSION -gt $OLD_MAJOR_VERSION ]]; then + echo "Stopping crowdsec" + systemctl stop crowdsec || true + cscli config backup /var/lib/crowdsec/backup + fi +fi + +echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c" diff --git a/debian/prerm b/debian/prerm new file mode 100644 index 0000000..eb4eb4e --- /dev/null +++ b/debian/prerm @@ -0,0 +1,9 @@ +if [ "$1" = "remove" ]; then + cscli dashboard remove -f -y || true + systemctl stop crowdsec + systemctl disable crowdsec +fi + +if [ "$1" = "upgrade" ]; then + systemctl stop crowdsec +fi \ No newline at end of file diff --git a/debian/rules b/debian/rules new file mode 100755 index 0000000..f496a9e --- /dev/null +++ b/debian/rules @@ -0,0 +1,53 @@ +#!/usr/bin/make -f + +export DEB_VERSION=$(shell dpkg-parsechangelog | egrep '^Version:' | cut -f 2 -d ' ') +export BUILD_VERSION=v${DEB_VERSION}-debian-pragmatic +export GO111MODULE=on + +# LD_OPTS=-ldflags "-s -w -X github.com/crowdsecurity/crowdsec/pkg/cwversion.Version=$(BUILD_VERSION) \ +# -X github.com/crowdsecurity/crowdsec/pkg/cwversion.BuildDate=$(BUILD_TIMESTAMP) \ +# -X github.com/crowdsecurity/crowdsec/pkg/cwversion.Codename=$(BUILD_CODENAME) \ +# -X github.com/crowdsecurity/crowdsec/pkg/cwversion.Tag=$(BUILD_TAG) \ +# -X github.com/crowdsecurity/crowdsec/pkg/cwversion.GoVersion=$(BUILD_GOVERSION)" + +%: + dh $@ + +override_dh_auto_clean: +override_dh_auto_test: +override_dh_auto_build: +override_dh_auto_install: +# mkdir /tmp/go +# echo $(go version) +# echo $($GOCMD version) +# cd cmd/crowdsec && GOROOT=/tmp/go GO111MODULE=on $(GOBUILD) $(LD_OPTS) -o $(CROWDSEC_BIN) -v && cd .. +# cd cmd/crowdsec-cli && GOROOT=/tmp/go GO111MODULE=on $(GOBUILD) $(LD_OPTS) -o cscli -v && cd .. + make build + mkdir -p debian/crowdsec/usr/bin + mkdir -p debian/crowdsec/etc/crowdsec + mkdir -p debian/crowdsec/usr/share/crowdsec + mkdir -p debian/crowdsec/etc/crowdsec/hub/ + mkdir -p debian/crowdsec/usr/share/crowdsec/config + + + mkdir -p debian/crowdsec/usr/lib/crowdsec/plugins/ + mkdir -p debian/crowdsec/etc/crowdsec/notifications/ + + install -m 551 plugins/notifications/slack/notification-slack debian/crowdsec/usr/lib/crowdsec/plugins/ + install -m 551 plugins/notifications/http/notification-http debian/crowdsec/usr/lib/crowdsec/plugins/ + install -m 551 plugins/notifications/splunk/notification-splunk debian/crowdsec/usr/lib/crowdsec/plugins/ + install -m 551 plugins/notifications/email/notification-email debian/crowdsec/usr/lib/crowdsec/plugins/ + + cp cmd/crowdsec/crowdsec debian/crowdsec/usr/bin + cp cmd/crowdsec-cli/cscli debian/crowdsec/usr/bin + cp wizard.sh debian/crowdsec/usr/share/crowdsec + install -m 600 config/config.yaml debian/crowdsec/etc/crowdsec/config.yaml + cp config/simulation.yaml debian/crowdsec/etc/crowdsec/simulation.yaml + cp config/profiles.yaml debian/crowdsec/etc/crowdsec/profiles.yaml + cp config/console.yaml debian/crowdsec/etc/crowdsec/console.yaml + cp -a config/patterns debian/crowdsec/etc/crowdsec + +override_dh_fixperms: + dh_fixperms + chmod 600 debian/crowdsec/etc/crowdsec/notifications/* + chmod 600 debian/crowdsec/etc/crowdsec/config.yaml diff --git a/debian/templates b/debian/templates new file mode 100644 index 0000000..c07ef84 --- /dev/null +++ b/debian/templates @@ -0,0 +1,23 @@ +Template: crowdsec/lapi +Type: boolean +Default: true +Description: Do you want to run the local API server ? + A local API is required to run crowdsec, but another installation can be used. + . + If you don't know what to do, answer yes. + +Template: crowdsec/lapi_host +Type: string +Default: 127.0.0.1:8080 +Description: Address of the local API server + A local API is required to run crowdsec, but another installation can be used. + . + Please add the address of the local API server + +Template: crowdsec/capi +Type: boolean +Default: true +Description: Do you want to the centralized remote API server ? + To share information with other crowdsec you can register to the centralized remote API server. + . + If you don't know what to do, answer yes. diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000..b1f4b12 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,222 @@ +# Quick reference + +* Documentation and help: https://docs.crowdsec.net/ +* Crowdsec concepts: https://docs.crowdsec.net/docs/concepts +* Where to file issues: https://github.com/crowdsecurity/crowdsec + +# What is Crowdsec + +Crowdsec - An open-source, lightweight agent to detect and respond to bad behaviours. It also automatically benefits from our global community-wide IP reputation database. + +# How to use this image + +## Docker image versions + +All the following versions are available on Docker Hub for 386, amd64, arm/v6, arm/v7, arm64. + +### Alpine + + - crowdsecurity/crowdsec:{version} + +Recommended for production usage. Also available on GitHub (ghrc.io). + + - crowdsecurity/crowdsec:latest + +For development and testing. + +since v1.4.2: + + - crowdsecurity/crowdsec:slim + +Reduced size by 60%, does not include notifier plugins nor the GeoIP database. +If you need these details on decisions, running `cscli hub upgrade` inside the +container downloads the GeoIP database at runtime. + + +### Debian (since v1.3.3) + + - crowdsecurity/crowdsec:{version}-debian + - crowdsecurity/crowdsec:latest-debian + +The debian version includes support for systemd and journalctl. + +### Custom + +You can build your own images with Dockerfile and Dockerfile-debian. + +For example, if you want a Debian version without plugin notifiers: + +```console +$ docker build -f Dockerfile.debian --build-arg=BUILD_ENV=slim +``` + +supported values for BUILD_ENV are: full, with-geoip, with-plugins, slim. + + +## Required configuration + +### Journalctl (only for debian image) +To use journalctl (only with the debian image) as a log stream, eventually from the `DSN` environment variable, it's important to mount the journal log from the host to the container itself. +This can be done by adding the following volume mount to your docker command: + +``` +-v /var/log/journal:/run/log/journal +``` + +### Logs ingestion and processing +Collections are a good place to start: https://docs.crowdsec.net/docs/collections/intro + +Find collections|scenarios|parsers|postoverflows in the hub: https://hub.crowdsec.net + + +* Specify collections|scenarios|parsers/postoverflows to install via the environment variables (by default [`crowdsecurity/linux`](https://hub.crowdsec.net/author/crowdsecurity/collections/linux) is installed) +* Mount volumes to specify your log files that should be ingested by crowdsec +### Acquisition + +`/etc/crowdsec/acquis.yaml` maps logs to provided parsers. Find out more here: https://docs.crowdsec.net/docs/concepts/#acquisition + +acquis.yaml example: +```shell +filenames: + - /logs/auth.log + - /logs/syslog +labels: + type: syslog +--- +filename: /logs/apache2/*.log +labels: + type: apache2 +``` + +`labels.type`: use `syslog` if logs origin is `syslog`, checkout collection's documentation for the relevant type otherwise. + +## Recommended configuration +### Volumes + +We strongly suggest to mount **named volumes** for Crowdsec configuration and database to avoid credentials and decisions loss in case of container's destruction and recreation, version update, etc. +* Credentials and configuration: `/etc/crowdsec` +* Database when using default SQLite: `/var/lib/crowdsec/data` + +## Start a Crowdsec instance + +```shell +docker run -d \ + -v local_path_to_crowdsec_config/acquis.yaml:/etc/crowdsec/acquis.yaml \ + -v crowdsec_config:/etc/crowdsec \ + -v crowdsec_data:/var/lib/crowdsec/data \ + -v /var/log/auth.log:/logs/auth.log:ro \ + -v /var/log/syslog.log:/logs/syslog.log:ro \ + -v /var/log/apache:/logs/apache:ro \ + -e COLLECTIONS="crowdsecurity/apache2 crowdsecurity/sshd" \ + -p 8080:8080 -p 6060:6060 \ + --name crowdsec crowdsecurity/crowdsec +``` + +## ... or docker-compose + +Check this full stack example using docker-compose: https://github.com/crowdsecurity/example-docker-compose +# How to extend this image +## Full configuration +The container is built with a specific docker [configuration](https://github.com/crowdsecurity/crowdsec/blob/master/docker/config.yaml). If you need to change it, bind `/etc/crowdsec/config.yaml` to your local configuration file +## Notifications +If you wish to use the [notification system](https://docs.crowdsec.net/docs/notification_plugins/intro), you will need to mount at least a custom `profiles.yaml` and a notification configuration to `/etc/crowdsec/notifications` + +# Deployment use cases +Crowdsec is composed of an `agent` that parses logs and creates `alerts` that `local API` or `LAPI` transform into decisions. Both can run in the same process but also on separated containers as it makes sense in complex configurations to have agents on the same machines as the protected component and a LAPI that gather all signals from agents and communicate with the `central api`. + +## Register a new agent with LAPI +```shell +docker exec -it crowdsec_lapi_container_name cscli machines add agent_user_name --password agent_password +``` + +## Run an agent connected to LAPI +Add the following environment variables to your docker run command: +* `DISABLE_LOCAL_API=true` +* `AGENT_USERNAME="agent_user_name"` - agent_user_name previously registered with LAPI +* `AGENT_PASSWORD="agent_password"` - agent_password previously registered with LAPI +* `LOCAL_API_URL="http://LAPI_host:LAPI_port"` + +# Next steps +## Bouncers +Crowdsec being a detection component, remediation is implemented using `bouncers`. Each bouncer protects a specific component. Find out more: + +https://hub.crowdsec.net/browse/#bouncers + +https://docs.crowdsec.net/docs/user_guides/bouncers_configuration/ + +### Automatic Bouncer Registration + +You can automatically register bouncers with the crowdsec container on startup using environment variables or Docker secrets. You cannot use this process to update an existing bouncer without first deleting it. + +To use environment variables, they should be in the format `BOUNCER_KEY_=`. e.g. `BOUNCER_KEY_nginx=mysecretkey12345`. + +To use Docker secrets, the secret should be named `bouncer_key_` with a content of ``. e.g. `bouncer_key_nginx` with content `mysecretkey12345`. + +A bouncer key can be any string but we recommend an alphanumeric value to keep consistent with crowdsec-generated keys and avoid problems with escaping special characters. + +## Console +We provide a web-based interface to get more from Crowdsec: https://docs.crowdsec.net/docs/console + +Subscribe here: https://app.crowdsec.net + +# Caveats +Using binds rather than named volumes ([more explanation here](https://docs.docker.com/storage/volumes/)) results in more complexity as you'll have to bind relevant files one by one whereas with named volumes you can mount full configuration and data folders. On the other hand, named volumes are less straightforward to navigate. + +# Reference +## Environment Variables + +* `COLLECTIONS` - Collections to install from the [hub](https://hub.crowdsec.net/browse/#collections), separated by space : `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` +* `SCENARIOS` - Scenarios to install from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e SCENARIOS="crowdsecurity/http-bad-user-agent crowdsecurity/http-xss-probing"` +* `PARSERS` - Parsers to install from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e PARSERS="crowdsecurity/http-logs crowdsecurity/modsecurity"` +* `POSTOVERFLOWS` - Postoverflows to install from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e POSTOVERFLOWS="crowdsecurity/cdn-whitelist"` +* `CONFIG_FILE` - Configuration file (default: `/etc/crowdsec/config.yaml`) : `-e CONFIG_FILE=""` +* `DSN` - Process a single source in time-machine : `-e DSN="file:///var/log/toto.log"` or `-e DSN="cloudwatch:///your/group/path:stream_name?profile=dev&backlog=16h"` or `-e DSN="journalctl://filters=_SYSTEMD_UNIT=ssh.service"` +* `TYPE` - [`Labels.type`](https://docs.crowdsec.net/Crowdsec/v1/references/acquisition/) for file in time-machine : `-e TYPE=""` +* `TEST_MODE` - Only test configs (default: `false`) : `-e TEST_MODE=""` +* `TZ` - Set the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to ensure logs have a local timestamp. +* `DISABLE_AGENT` - Only test configs (default: `false`) : `-e DISABLE_AGENT=""` +* `DISABLE_LOCAL_API` - Disable local API (default: `false`) : `-e DISABLE_LOCAL_API=""` +* `AGENT_USERNAME` - Agent username (to register if is LAPI or to use if it's an agent) : `-e AGENT_USERNAME="machine_id"` +* `AGENT_PASSWORD` - Agent password (to register if is LAPI or to use if it's an agent) : `-e AGENT_PASSWORD="machine_password"` +* `LOCAL_API_URL` - To specify when an agent needs to connect to a LAPI crowdsec (To use only when `DISABLE_LOCAL_API` is set to `true`) : `-e LOCAL_API_URL="http://lapi-address:8080"` +* `DISABLE_ONLINE_API` - Disable Online API registration for signal sharing (default: `false`) : `-e DISABLE_ONLINE_API=""` +* `LEVEL_TRACE` - Trace-level (VERY verbose) on stdout (default: `false`) : `-e LEVEL_TRACE=""` +* `LEVEL_DEBUG` - Debug-level on stdout (default: `false`) : `-e LEVEL_DEBUG=""` +* `LEVEL_INFO` - Info-level on stdout (default: `false`) : `-e LEVEL_INFO=""` +* `USE_TLS` - Enable TLS on the API Server (default: `false`) : `-e USE_TLS=""` +* `CERT_FILE` - TLS Certificate file (default: `/etc/ssl/cert.pem`) : `-e CERT_FILE=""` +* `KEY_FILE` - TLS Key file (default: `/etc/ssl/key.pem`) : `-e KEY_FILE=""` +* `CUSTOM_HOSTNAME` - Custom hostname for local api (default: `localhost`) : `-e CUSTOM_HOSTNAME=""` +* `DISABLE_COLLECTIONS` - Collections to remove from the [hub](https://hub.crowdsec.net/browse/#collections), separated by space : `-e DISABLE_COLLECTIONS="crowdsecurity/linux crowdsecurity/nginx"` +* `DISABLE_PARSERS` - Parsers to remove from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e DISABLE_PARSERS="crowdsecurity/apache2-logs crowdsecurity/nginx-logs"` +* `DISABLE_SCENARIOS` - Scenarios to remove from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e DISABLE_SCENARIOS="crowdsecurity/http-bad-user-agent crowdsecurity/http-xss-probing"` +* `DISABLE_POSTOVERFLOWS` - Postoverflows to remove from the [hub](https://hub.crowdsec.net/browse/#configurations), separated by space : `-e DISABLE_POSTOVERFLOWS="crowdsecurity/cdn-whitelist crowdsecurity/seo-bots-whitelist"` +* `PLUGIN_DIR` - Directory for plugins (default: `/usr/local/lib/crowdsec/plugins/`) : `-e PLUGIN_DIR=""` +* `BOUNCER_KEY_` - Register a bouncer with the name `` and a key equal to the value of the environment variable. +* `ENROLL_KEY` - Enroll key retrieved from [the console](https://app.crowdsec.net/) to enroll the instance. +* `ENROLL_INSTANCE_NAME` - To set an instance name and see it on [the console](https://app.crowdsec.net/). +* `ENROLL_TAGS` - To set tags when enrolling an instance and use them for search and filtering on [the console](https://app.crowdsec.net/) + +## Volumes + +* `/var/lib/crowdsec/data/` - Directory where all crowdsec data (Databases) is located + +* `/etc/crowdsec/` - Directory where all crowdsec configurations are located + +## File Locations + +* `/usr/local/bin/crowdsec` - Crowdsec binary + +* `/usr/local/bin/cscli` - Crowdsec CLI binary to interact with crowdsec + +# Find Us + +* [GitHub](https://github.com/crowdsecurity/crowdsec) + +# Contributing + +Please read [contributing](https://docs.crowdsec.net/Crowdsec/v1/contributing/) for details on our code of conduct, and the process for submitting pull requests to us. + +# License + +This project is licensed under the MIT License - see the [LICENSE](https://github.com/crowdsecurity/crowdsec/blob/master/LICENSE) file for details. diff --git a/docker/config.yaml b/docker/config.yaml new file mode 100644 index 0000000..1589a31 --- /dev/null +++ b/docker/config.yaml @@ -0,0 +1,56 @@ +common: + daemonize: false + pid_dir: /var/run/ + log_media: stdout + log_level: info + log_dir: /var/log/ + working_dir: . +config_paths: + config_dir: /etc/crowdsec/ + data_dir: /var/lib/crowdsec/data/ + simulation_path: /etc/crowdsec/simulation.yaml + hub_dir: /etc/crowdsec/hub/ + index_path: /etc/crowdsec/hub/.index.json + notification_dir: /etc/crowdsec/notifications/ + plugin_dir: /usr/local/lib/crowdsec/plugins/ +crowdsec_service: + acquisition_path: /etc/crowdsec/acquis.yaml + parser_routines: 1 +plugin_config: + user: nobody + group: nobody +cscli: + output: human +db_config: + log_level: info + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + #user: + #password: + #db_name: + #host: + #port: + flush: + max_items: 5000 + max_age: 7d +api: + client: + insecure_skip_verify: false + credentials_path: /etc/crowdsec/local_api_credentials.yaml + server: + log_level: info + listen_uri: 0.0.0.0:8080 + profiles_path: /etc/crowdsec/profiles.yaml + trusted_ips: # IP ranges, or IPs which can have admin API access + - 127.0.0.1 + - ::1 + online_client: # Central API credentials (to push signals and receive bad IPs) + #credentials_path: /etc/crowdsec/online_api_credentials.yaml +# tls: +# cert_file: /etc/crowdsec/ssl/cert.pem +# key_file: /etc/crowdsec/ssl/key.pem +prometheus: + enabled: true + level: full + listen_addr: 0.0.0.0 + listen_port: 6060 diff --git a/docker/docker_start.sh b/docker/docker_start.sh new file mode 100755 index 0000000..05f1603 --- /dev/null +++ b/docker/docker_start.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +# Set the crowdsec config file +CS_CONFIG_FILE="/etc/crowdsec/config.yaml" +if [ "$CONFIG_FILE" != "" ]; then + CS_CONFIG_FILE="$CONFIG_FILE" +fi + +# TLS defaults +CERT_FILE="${CERT_FILE:-/etc/ssl/cert.pem}" +KEY_FILE="${KEY_FILE:-/etc/ssl/key.pem}" + +# Plugins directory default +PLUGIN_DIR="${PLUGIN_DIR:-/usr/local/lib/crowdsec/plugins/}" + +# Check & prestage databases +for geodb in GeoLite2-ASN.mmdb GeoLite2-City.mmdb; do + # We keep the pre-populated geoib databases in /staging instead of /var, + # because if the data directory is bind-mounted from the host, it will be + # empty and the files will be out of reach, requiring a runtime download. + # We link to them to save about 80Mb compared to cp/mv. + if [ ! -e "/var/lib/crowdsec/data/$geodb" ] && [ -e "/staging/var/lib/crowdsec/data/$geodb" ]; then + mkdir -p /var/lib/crowdsec/data + ln -s "/staging/var/lib/crowdsec/data/$geodb" /var/lib/crowdsec/data/ + fi +done + +#Check & prestage /etc/crowdsec +if [ ! -e "/etc/crowdsec/local_api_credentials.yaml" ] && [ ! -e "/etc/crowdsec/config.yaml" ]; then + mkdir -p /etc/crowdsec + cp -r /staging/etc/* /etc/ +fi + +# regenerate local agent credentials (ignore if agent is disabled) +if [ "$DISABLE_AGENT" == "" ] ; then + echo "Regenerate local agent credentials" + cscli -c "$CS_CONFIG_FILE" machines delete "${CUSTOM_HOSTNAME:-localhost}" + if [ "$LOCAL_API_URL" != "" ] ; then + cscli -c "$CS_CONFIG_FILE" machines add "${CUSTOM_HOSTNAME:-localhost}" --auto --url "$LOCAL_API_URL" + else + cscli -c "$CS_CONFIG_FILE" machines add "${CUSTOM_HOSTNAME:-localhost}" --auto + fi + if [ "$AGENT_USERNAME" != "" ] && [ "$AGENT_PASSWORD" != "" ] && [ "$LOCAL_API_URL" != "" ] ; then + echo "set up lapi credentials for agent" + CONFIG_PATH=$(yq eval '.api.client.credentials_path' "$CS_CONFIG_FILE" ) + echo "url: $LOCAL_API_URL" > "$CONFIG_PATH" + echo "login: $AGENT_USERNAME" >> "$CONFIG_PATH" + echo "password: $AGENT_PASSWORD" >> "$CONFIG_PATH" + fi +fi + +# Check if lapi needs to automatically register an agent +echo "Check if lapi need to register automatically an agent" +if [ "$DISABLE_LOCAL_API" == "" ] && [ "$AGENT_USERNAME" != "" ] && [ "$AGENT_PASSWORD" != "" ] ; then + if [ "$LOCAL_API_URL" != "" ] ; then + cscli -c "$CS_CONFIG_FILE" machines add "$AGENT_USERNAME" --password "$AGENT_PASSWORD" --url "$LOCAL_API_URL" + else + cscli -c "$CS_CONFIG_FILE" machines add "$AGENT_USERNAME" --password "$AGENT_PASSWORD" + fi + echo "Agent registered to lapi" +fi + +# registration to online API for signal push +if [ "${DISABLE_ONLINE_API,,}" != "true" ] && [ "$CONFIG_FILE" == "" ] ; then + CONFIG_EXIST=$(yq eval '.api.server.online_client | has("credentials_path")' "$CS_CONFIG_FILE") + if [ "$CONFIG_EXIST" != "true" ]; then + yq eval '.api.server.online_client = {"credentials_path": "/etc/crowdsec/online_api_credentials.yaml"}' "$CS_CONFIG_FILE" > /etc/crowdsec/config2.yaml + mv /etc/crowdsec/config2.yaml "$CS_CONFIG_FILE" + cscli -c "$CS_CONFIG_FILE" capi register > /etc/crowdsec/online_api_credentials.yaml + echo "registration to online API done" + fi +fi + +## Enroll instance if enroll key is provided +if [ "${DISABLE_ONLINE_API,,}" != "true" ] && [ "$ENROLL_KEY" != "" ] ; then + enroll_args="" + if [ "$ENROLL_INSTANCE_NAME" != "" ] ; then + enroll_args="--name $ENROLL_INSTANCE_NAME" + fi + if [ "$ENROLL_TAGS" != "" ] ; then + #shellcheck disable=SC2086 + for tag in ${ENROLL_TAGS} + do + enroll_args="$enroll_args --tags $tag" + done + fi + #shellcheck disable=SC2086 + cscli console enroll $enroll_args "$ENROLL_KEY" +fi + +# crowdsec sqlite database permissions +if [ "$GID" != "" ]; then + IS_SQLITE=$(yq eval '.db_config.type == "sqlite"' "$CS_CONFIG_FILE") + DB_PATH=$(yq eval '.db_config.db_path' "$CS_CONFIG_FILE") + if [ "$IS_SQLITE" == "true" ]; then + chown ":$GID" "$DB_PATH" + echo "sqlite database permissions updated" + fi +fi + +if [ "${USE_TLS,,}" == "true" ]; then + yq -i eval ".api.server.tls.cert_file = \"$CERT_FILE\"" "$CS_CONFIG_FILE" + yq -i eval ".api.server.tls.key_file = \"$KEY_FILE\"" "$CS_CONFIG_FILE" + yq -i eval '... comments=""' "$CS_CONFIG_FILE" +fi + +if [ "$PLUGIN_DIR" != "/usr/local/lib/crowdsec/plugins/" ]; then + yq -i eval ".config_paths.plugin_dir = \"$PLUGIN_DIR\"" "$CS_CONFIG_FILE" +fi + +## Install collections, parsers, scenarios & postoverflows +cscli -c "$CS_CONFIG_FILE" hub update +cscli -c "$CS_CONFIG_FILE" collections upgrade crowdsecurity/linux || true +cscli -c "$CS_CONFIG_FILE" parsers upgrade crowdsecurity/whitelists || true +cscli -c "$CS_CONFIG_FILE" parsers install crowdsecurity/docker-logs || true +if [ "$COLLECTIONS" != "" ]; then + #shellcheck disable=SC2086 + cscli -c "$CS_CONFIG_FILE" collections install $COLLECTIONS +fi +if [ "$PARSERS" != "" ]; then + #shellcheck disable=SC2086 + cscli -c "$CS_CONFIG_FILE" parsers install $PARSERS +fi +if [ "$SCENARIOS" != "" ]; then + #shellcheck disable=SC2086 + cscli -c "$CS_CONFIG_FILE" scenarios install $SCENARIOS +fi +if [ "$POSTOVERFLOWS" != "" ]; then + #shellcheck disable=SC2086 + cscli -c "$CS_CONFIG_FILE" postoverflows install $POSTOVERFLOWS +fi + +## Remove collections, parsers, scenarios & postoverflows +if [ "$DISABLE_COLLECTIONS" != "" ]; then + #shellcheck disable=SC2086 + cscli -c "$CS_CONFIG_FILE" collections remove $DISABLE_COLLECTIONS +fi +if [ "$DISABLE_PARSERS" != "" ]; then + #shellcheck disable=SC2086 + cscli -c "$CS_CONFIG_FILE" parsers remove $DISABLE_PARSERS +fi +if [ "$DISABLE_SCENARIOS" != "" ]; then + #shellcheck disable=SC2086 + cscli -c "$CS_CONFIG_FILE" scenarios remove $DISABLE_SCENARIOS +fi +if [ "$DISABLE_POSTOVERFLOWS" != "" ]; then + #shellcheck disable=SC2086 + cscli -c "$CS_CONFIG_FILE" postoverflows remove $DISABLE_POSTOVERFLOWS +fi + +function register_bouncer { + if ! cscli -c "$CS_CONFIG_FILE" bouncers list -o json | sed '/^ *"name"/!d;s/^ *"name": "\(.*\)",/\1/' | grep -q "^${NAME}$"; then + if cscli -c "$CS_CONFIG_FILE" bouncers add "${NAME}" -k "${KEY}" > /dev/null; then + echo "Registered bouncer for ${NAME}" + else + echo "Failed to register bouncer for ${NAME}" + fi + fi +} + +## Register bouncers via env +for BOUNCER in $(compgen -A variable | grep -i BOUNCER_KEY); do + KEY=$(printf '%s' "${!BOUNCER}") + NAME=$(printf '%s' "$BOUNCER" | cut -d_ -f2-) + if [[ -n $KEY ]] && [[ -n $NAME ]]; then + register_bouncer + fi +done + +## Register bouncers via secrets +shopt -s nullglob extglob +for BOUNCER in /run/secrets/@(bouncer_key|BOUNCER_KEY)* ; do + KEY=$(cat "${BOUNCER}") + NAME=$(echo "${BOUNCER}" | awk -F "/" '{printf $NF}' | cut -d_ -f2-) + if [[ -n $KEY ]] && [[ -n $NAME ]]; then + register_bouncer + fi +done +shopt -u nullglob extglob + +ARGS="" +if [ "$CONFIG_FILE" != "" ]; then + ARGS="-c $CONFIG_FILE" +fi +if [ "$DSN" != "" ]; then + ARGS="$ARGS -dsn ${DSN}" +fi + +if [ "$TYPE" != "" ]; then + ARGS="$ARGS -type $TYPE" +fi +if [ "${TEST_MODE,,}" == "true" ]; then + ARGS="$ARGS -t" +fi +if [ "${DISABLE_AGENT,,}" == "true" ]; then + ARGS="$ARGS -no-cs" +fi +if [ "${DISABLE_LOCAL_API,,}" == "true" ]; then + ARGS="$ARGS -no-api" +fi +if [ "${LEVEL_TRACE,,}" == "true" ]; then + ARGS="$ARGS -trace" +fi +if [ "${LEVEL_DEBUG,,}" == "true" ]; then + ARGS="$ARGS -debug" +fi +if [ "${LEVEL_INFO,,}" == "true" ]; then + ARGS="$ARGS -info" +fi + +#shellcheck disable=SC2086 +exec crowdsec $ARGS diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..a1c4ed1 --- /dev/null +++ b/go.mod @@ -0,0 +1,177 @@ +module github.com/crowdsecurity/crowdsec + +go 1.19 + +require ( + entgo.io/ent v0.11.3 + github.com/AlecAivazis/survey/v2 v2.2.7 + github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/alexliesenfeld/health v0.5.1 + github.com/antonmedv/expr v1.9.0 + github.com/appleboy/gin-jwt/v2 v2.8.0 + github.com/aws/aws-sdk-go v1.42.25 + github.com/buger/jsonparser v1.1.1 + github.com/c-robinson/iplib v1.0.3 + github.com/confluentinc/bincover v0.2.0 + github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf + github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 + github.com/crowdsecurity/grokky v0.1.0 + github.com/crowdsecurity/machineid v1.0.2 + github.com/davecgh/go-spew v1.1.1 + github.com/dghubble/sling v1.3.0 + github.com/docker/docker v20.10.2+incompatible + github.com/docker/go-connections v0.4.0 + github.com/enescakir/emoji v1.0.0 + github.com/fatih/color v1.13.0 + github.com/fsnotify/fsnotify v1.5.1 + github.com/gin-gonic/gin v1.7.7 + github.com/go-co-op/gocron v1.17.0 + github.com/go-openapi/errors v0.20.1 + github.com/go-openapi/strfmt v0.19.11 + github.com/go-openapi/swag v0.19.12 + github.com/go-openapi/validate v0.20.0 + github.com/go-sql-driver/mysql v1.6.0 + github.com/google/go-querystring v1.0.0 + github.com/google/uuid v1.3.0 + github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e + github.com/hashicorp/go-hclog v1.0.0 + github.com/hashicorp/go-plugin v1.4.2 + github.com/hashicorp/go-version v1.2.1 + github.com/jackc/pgx/v4 v4.14.1 + github.com/jarcoal/httpmock v1.1.0 + github.com/jszwec/csvutil v1.5.1 + github.com/lib/pq v1.10.7 + github.com/mattn/go-sqlite3 v1.14.15 + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 + github.com/nxadm/tail v1.4.6 + github.com/oschwald/geoip2-golang v1.4.0 + github.com/oschwald/maxminddb-golang v1.8.0 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/prom2json v1.3.0 + github.com/r3labs/diff/v2 v2.14.1 + github.com/sirupsen/logrus v1.8.1 + github.com/spf13/cobra v1.5.0 + github.com/stretchr/testify v1.8.0 + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 + google.golang.org/grpc v1.45.0 + google.golang.org/protobuf v1.28.0 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 + gopkg.in/yaml.v2 v2.4.0 + gotest.tools/v3 v3.0.3 +) + +require ( + github.com/Masterminds/sprig/v3 v3.2.2 + github.com/aquasecurity/table v1.8.0 + github.com/beevik/etree v1.1.0 + github.com/blackfireio/osinfo v1.0.3 + github.com/google/winops v0.0.0-20211216095627-f0e86eb1453b + github.com/ivanpirog/coloredcobra v1.0.1 + github.com/mattn/go-isatty v0.0.14 + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 + github.com/segmentio/kafka-go v0.4.34 + github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f +) + +require ( + ariga.io/atlas v0.7.2-0.20220927111110-867ee0cca56a // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.1.1 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/containerd/containerd v1.6.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/creack/pty v1.1.11 // indirect + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-openapi/analysis v0.19.16 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/loads v0.20.0 // indirect + github.com/go-openapi/runtime v0.19.24 // indirect + github.com/go-openapi/spec v0.20.0 // indirect + github.com/go-playground/locales v0.14.0 // indirect + github.com/go-playground/universal-translator v0.18.0 // indirect + github.com/go-playground/validator/v10 v10.10.0 // indirect + github.com/go-stack/stack v1.8.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.2.0 // indirect + github.com/golang/glog v0.0.0-20210429001901-424d2337a529 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-cmp v0.5.7 // indirect + github.com/gorilla/mux v1.7.3 // indirect + github.com/hashicorp/hcl/v2 v2.13.0 // indirect + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect + github.com/huandu/xstrings v1.3.2 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.10.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.2.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect + github.com/jackc/pgtype v1.9.1 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/klauspost/compress v1.15.7 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 // indirect + github.com/pierrec/lz4/v4 v4.1.15 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/common v0.30.0 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/shopspring/decimal v1.2.0 // indirect + github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tidwall/gjson v1.13.0 // indirect + github.com/ugorji/go/codec v1.2.6 // indirect + github.com/vjeantet/grok v1.0.1 // indirect + github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/zclconf/go-cty v1.10.0 // indirect + go.mongodb.org/mongo-driver v1.9.0 // indirect + golang.org/x/net v0.0.0-20220706163947-c90051bbdb60 // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace golang.org/x/time/rate => github.com/crowdsecurity/crowdsec/pkg/time/rate v0.0.0 diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..152b73d --- /dev/null +++ b/go.sum @@ -0,0 +1,1180 @@ +ariga.io/atlas v0.7.2-0.20220927111110-867ee0cca56a h1:6/nt4DODfgxzHTTg3tYy7YkVzruGQGZ/kRvXpA45KUo= +ariga.io/atlas v0.7.2-0.20220927111110-867ee0cca56a/go.mod h1:ft47uSh5hWGDCmQC9DsztZg6Xk+KagM5Ts/mZYKb9JE= +bitbucket.org/creachadair/stringset v0.0.9 h1:L4vld9nzPt90UZNrXjNelTshD74ps4P5NGs3Iq6yN3o= +bitbucket.org/creachadair/stringset v0.0.9/go.mod h1:t+4WcQ4+PXTa8aQdNKe40ZP6iwesoMFWAxPGd3UGjyY= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +entgo.io/ent v0.11.3 h1:F5FBGAWiDCGder7YT+lqMnyzXl6d0xU3xMBM/SO3CMc= +entgo.io/ent v0.11.3/go.mod h1:mvDhvynOzAsOe7anH7ynPPtMjA/eeXP96kAfweevyxc= +github.com/AlecAivazis/survey/v2 v2.2.7 h1:5NbxkF4RSKmpywYdcRgUmos1o+roJY8duCLZXbVjoig= +github.com/AlecAivazis/survey/v2 v2.2.7/go.mod h1:9DYvHgXtiXm6nCn+jXnOXLKbH+Yo9u8fAS/SduGdoPk= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:pzStYMLAXM7CNQjS/Wn+zK9MUxDhSUNfVvnHsyQyjs0= +github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:ilK+u7u1HoqaDk0mjhh27QJB7PyWMreGffEvOCoEKiY= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexliesenfeld/health v0.5.1 h1:cohQdtQbJdA6bj0aMD4gdXA9xQyvh9NxWO9XLGYTYcY= +github.com/alexliesenfeld/health v0.5.1/go.mod h1:N4NDIeQtlWumG+6z1ne1v62eQxktz5ylEgGgH9emdMw= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antonmedv/expr v1.9.0 h1:j4HI3NHEdgDnN9p6oI6Ndr0G5QryMY0FNxT4ONrFDGU= +github.com/antonmedv/expr v1.9.0/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/appleboy/gin-jwt/v2 v2.8.0 h1:Glo7cb9eBR+hj8Y7WzgfkOlqCaNLjP+RV4dNO3fpdps= +github.com/appleboy/gin-jwt/v2 v2.8.0/go.mod h1:KsK7E8HTvRg3vOiumTsr/ntNTHbZ3IbHLe4Eto31p7k= +github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= +github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= +github.com/aquasecurity/table v1.8.0 h1:9ntpSwrUfjrM6/YviArlx/ZBGd6ix8W+MtojQcM7tv0= +github.com/aquasecurity/table v1.8.0/go.mod h1:eqOmvjjB7AhXFgFqpJUEE/ietg7RrMSJZXyTN8E/wZw= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.42.25 h1:BbdvHAi+t9LRiaYUyd53noq9jcaAcfzOhSVbKfr6Avs= +github.com/aws/aws-sdk-go v1.42.25/go.mod h1:gyRszuZ/icHmHAVE4gc/r+cfCmhA1AD+vqfWbgI+eHs= +github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= +github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blackfireio/osinfo v1.0.3 h1:Yk2t2GTPjBcESv6nDSWZKO87bGMQgO+Hi9OoXPpxX8c= +github.com/blackfireio/osinfo v1.0.3/go.mod h1:Pd987poVNmd5Wsx6PRPw4+w7kLlf9iJxoRKPtPAjOrA= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/c-robinson/iplib v1.0.3 h1:NG0UF0GoEsrC1/vyfX1Lx2Ss7CySWl3KqqXh3q4DdPU= +github.com/c-robinson/iplib v1.0.3/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/confluentinc/bincover v0.2.0 h1:WSS3MqzwJbosCLMOuF3tJ0pMpALzBfrm80Tb+/3gbQs= +github.com/confluentinc/bincover v0.2.0/go.mod h1:qeI1wx0RxdGTZtrJY0HVlgJ4NqC/X2Z+fHbvy87tgHE= +github.com/containerd/containerd v1.6.2 h1:pcaPUGbYW8kBw6OgIZwIVIeEhdWVrBzsoCfVJ5BjrLU= +github.com/containerd/containerd v1.6.2/go.mod h1:sidY30/InSE1j2vdD1ihtKoJz+lWdaXMdiAeIupaf+s= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/staticfile v0.1.3/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= +github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= +github.com/crowdsecurity/grokky v0.1.0 h1:jLUzZd3vKxYrM4hQ8n5HWLfvs5ag4UP08eT9OTekI4U= +github.com/crowdsecurity/grokky v0.1.0/go.mod h1:fx5UYUYAFIrOUNAkFCUOM2wJcsp9EWSQE9R0/9kaFJg= +github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= +github.com/crowdsecurity/machineid v1.0.2/go.mod h1:XWUSlnS0R0+u/JK5ulidwlbceNT3ZOCKteoVQEn6Luo= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dghubble/sling v1.3.0 h1:pZHjCJq4zJvc6qVQ5wN1jo5oNZlNE0+8T/h0XeXBUKU= +github.com/dghubble/sling v1.3.0/go.mod h1:XXShWaBWKzNLhu2OxikSNFrlsvowtz4kyRuXUG7oQKY= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.2+incompatible h1:vFgEHPqWBTp4pTjdLwjAA4bSo3gvIGOYwuJTlEjVBCw= +github.com/docker/docker v20.10.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog= +github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs= +github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-co-op/gocron v1.17.0 h1:IixLXsti+Qo0wMvmn6Kmjp2csk2ykpkcL+EmHmST18w= +github.com/go-co-op/gocron v1.17.0/go.mod h1:IpDBSaJOVfFw7hXZuTag3SCSkqazXBBUkbQ1m1aesBs= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.16 h1:Ub9e++M8sDwtHD+S587TYi+6ANBG1NRYGZDihqk0SaY= +github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.1 h1:j23mMDtRxMwIobkpId7sWh7Ddcx4ivaoqUbfXx5P+a8= +github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.20.0 h1:Pymw1O8zDmWeNv4kVsHd0W3cvgdp8juRa4U/U/8D/Pk= +github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24 h1:TqagMVlRAOTwllE/7hNKx6rQ10O6T8ZzeJdMjSTKaD4= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.0 h1:HGLc8AJ7ynOxwv0Lq4TsnwLsWMawHAYiJIFzbcML86I= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11 h1:0+YvbNh05rmBkgztd6zHp4OCFn7Mtu30bn46NQo2ZRw= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.12 h1:Bc0bnY2c3AoF7Gc+IMIAQQsD8fLHjHpc19wXvYuayQI= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= +github.com/go-openapi/validate v0.20.0 h1:pzutNCCBZGZlE+u8HD3JZyWdc/TVbtVwlWUp8/vgUKk= +github.com/go-openapi/validate v0.20.0/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.10.0 h1:I7mrTYv78z8k8VXa/qJlOlEXn/nBh+BF8dHX5nt/dr0= +github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v0.0.0-20210429001901-424d2337a529 h1:2voWjNECnrZRbfwXxHB1/j8wa6xdKn85B5NzgVL/pTU= +github.com/golang/glog v0.0.0-20210429001901-424d2337a529/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/logger v1.1.1/go.mod h1:BkeJZ+1FhQ+/d087r4dzojEg1u2ZX+ZqG1jTUrLM+zQ= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/winops v0.0.0-20211216095627-f0e86eb1453b h1:THwEE9J2wPxF3BZm7WjLCASMcM7ctFzqLpTsCGh7gDY= +github.com/google/winops v0.0.0-20211216095627-f0e86eb1453b/go.mod h1:ShbX8v8clPm/3chw9zHVwtW3QhrFpL8mXOwNxClt4pg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e h1:XmA6L9IPRdUr28a+SK/oMchGgQy159wvzXA5tJ7l+40= +github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e/go.mod h1:AFIo+02s+12CEg8Gzz9kzhCbmbq6JcKNrhHffCGA9z4= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/groob/plist v0.0.0-20210519001750-9f754062e6d6/go.mod h1:itkABA+w2cw7x5nYUS/pLRef6ludkZKOigbROmCTaFw= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0 h1:bkKf0BeBXcSYa7f5Fyi9gMuQ8gNsxeiNpZjR6VxNZeo= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-plugin v1.4.2 h1:yFvG3ufXXpqiMiZx9HLcaK3XbIqQ1WJFR/F1a2CuVw0= +github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= +github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ivanpirog/coloredcobra v1.0.1 h1:aURSdEmlR90/tSiWS0dMjdwOvCVUeYLfltLfbgNxrN4= +github.com/ivanpirog/coloredcobra v1.0.1/go.mod h1:iho4nEKcnwZFiniGSdcgdvRgZNjxm+h20acv8vqmN6Q= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.10.1 h1:DzdIHIjG1AxGwoEEqS+mGsURyjt4enSmqzACXvVzOT8= +github.com/jackc/pgconn v1.10.1/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= +github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.9.1 h1:MJc2s0MFS8C3ok1wQTdQxWuXQcB6+HwAm5x1CzW7mf0= +github.com/jackc/pgtype v1.9.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.14.1 h1:71oo1KAGI6mXhLiTMn6iDFcp3e7+zon/capWjl2OEFU= +github.com/jackc/pgx/v4 v4.14.1/go.mod h1:RgDuE4Z34o7XE92RpLsvFiOEfrAUT0Xt2KxvX73W06M= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.2.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jarcoal/httpmock v1.1.0 h1:F47ChZj1Y2zFsCXxNkBPwNNKnAyOATcdQibk0qEdVCE= +github.com/jarcoal/httpmock v1.1.0/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jszwec/csvutil v1.5.1 h1:c3GFBhj6DFMUl4dMK3+B6rz2+LWWS/e9VJiVJ9t9kfQ= +github.com/jszwec/csvutil v1.5.1/go.mod h1:Rpu7Uu9giO9subDyMCIQfHVDuLrcaC36UA4YcJjGBkg= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok= +github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.6 h1:11TGpSHY7Esh/i/qnq02Jo5oVrI1Gue8Slbq0ujPZFQ= +github.com/nxadm/tail v1.4.6/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 h1:q37d91F6BO4Jp1UqWiun0dUFYaqv6WsKTLTCaWv+8LY= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/oschwald/geoip2-golang v1.4.0 h1:5RlrjCgRyIGDz/mBmPfnAF4h8k0IAcRv9PvrpOfz+Ug= +github.com/oschwald/geoip2-golang v1.4.0/go.mod h1:8QwxJvRImBH+Zl6Aa6MaIcs5YdlZSTKtzmPGzQqi9ng= +github.com/oschwald/maxminddb-golang v1.6.0/go.mod h1:DUJFucBg2cvqx42YmDa/+xHvb0elJtOm3o4aFQ/nb/w= +github.com/oschwald/maxminddb-golang v1.8.0 h1:Uh/DSnGoxsyp/KYbY1AuP0tYEwfs0sCph9p/UMXK/Hk= +github.com/oschwald/maxminddb-golang v1.8.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y= +github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM= +github.com/r3labs/diff/v2 v2.14.1 h1:wRZ3jB44Ny50DSXsoIcFQ27l2x+n5P31K/Pk+b9B0Ic= +github.com/r3labs/diff/v2 v2.14.1/go.mod h1:I8noH9Fc2fjSaMxqF3G2lhDdC0b+JXCfyx85tWFM9kc= +github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/segmentio/kafka-go v0.4.34 h1:Dm6YlLMiVSiwwav20KY0AoY63s661FXevwJ3CVHUERo= +github.com/segmentio/kafka-go v0.4.34/go.mod h1:GAjxBQJdQMB5zfNA21AhpaqOB2Mu+w3De4ni3Gbm8y0= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg= +github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c/go.mod h1:JlzghshsemAMDGZLytTFY8C1JQxQPhnatWqNwUXjggo= +github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.13.0 h1:3TFY9yxOQShrvmjdM76K+jc66zJeT6D3/VFFYCGQf7M= +github.com/tidwall/gjson v1.13.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ= +github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= +github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw= +github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0= +github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.9.0 h1:f3aLGJvQmBl8d9S40IL+jEyBC6hfLPbJjv9t5hEM9ck= +go.mongodb.org/mongo-driver v1.9.0/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220706163947-c90051bbdb60 h1:8NSylCMxLW4JvserAndSgFL7aPli6A68yf0bYFTcWCM= +golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 h1:myaecH64R0bIEDjNORIel4iXubqzaHU1K2z8ajBwWcM= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/make_chocolatey.ps1 b/make_chocolatey.ps1 new file mode 100644 index 0000000..67f85c3 --- /dev/null +++ b/make_chocolatey.ps1 @@ -0,0 +1,18 @@ +param ( + $version +) +if ($version.StartsWith("v")) +{ + $version = $version.Substring(1) +} + +#Pre-releases will be like 1.4.0-rc1, remove everything after the dash as it does not conform to the MSI versioning scheme +if ($version.Contains("-")) +{ + $version = $version.Substring(0, $version.IndexOf("-")) +} + +Set-Location .\windows\Chocolatey\crowdsec +Copy-Item ..\..\..\crowdsec_$version.msi tools\crowdsec.msi + +choco pack --version $version \ No newline at end of file diff --git a/make_installer.ps1 b/make_installer.ps1 new file mode 100644 index 0000000..a20ffaf --- /dev/null +++ b/make_installer.ps1 @@ -0,0 +1,20 @@ +param ( + $version +) +$env:Path += ";C:\Program Files (x86)\WiX Toolset v3.11\bin" +if ($version.StartsWith("v")) +{ + $version = $version.Substring(1) +} + +#Pre-releases will be like 1.4.0-rc1, remove everything after the dash as it does not conform to the MSI versioning scheme +if ($version.Contains("-")) +{ + $version = $version.Substring(0, $version.IndexOf("-")) +} + +Remove-Item -Force -Recurse -Path .\msi -ErrorAction SilentlyContinue +#we only harvest the patterns dir, as we want to handle differently some yaml files in the config directory, and I really don't want to write xlst filters to exclude the files :( +heat.exe dir config\patterns -nologo -cg CrowdsecPatterns -dr PatternsDir -g1 -ag -sf -srd -scom -sreg -out "msi\fragment.wxs" +candle.exe -arch x64 -dSourceDir=config\patterns -dVersion="$version" -out msi\ msi\fragment.wxs windows\installer\WixUI_HK.wxs windows\installer\product.wxs +light.exe -b .\config\patterns -ext WixUIExtension -ext WixUtilExtension -sacl -spdb -out crowdsec_$version.msi msi\fragment.wixobj msi\WixUI_HK.wixobj msi\product.wixobj \ No newline at end of file diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go new file mode 100644 index 0000000..00c5325 --- /dev/null +++ b/pkg/acquisition/acquisition.go @@ -0,0 +1,272 @@ +package acquisition + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + cloudwatchacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/cloudwatch" + dockeracquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/docker" + fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file" + journalctlacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/journalctl" + kafkaacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kafka" + kinesisacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kinesis" + syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog" + wineventlogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/wineventlog" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + + tomb "gopkg.in/tomb.v2" +) + +// The interface each datasource must implement +type DataSource interface { + GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module + GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality) + Configure([]byte, *log.Entry) error // Configure the datasource + ConfigureByDSN(string, map[string]string, *log.Entry) error // Configure the datasource + GetMode() string // Get the mode (TAIL, CAT or SERVER) + GetName() string // Get the name of the module + OneShotAcquisition(chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file) + StreamingAcquisition(chan types.Event, *tomb.Tomb) error // Start live acquisition (eg, tail a file) + CanRun() error // Whether the datasource can run or not (eg, journalctl on BSD is a non-sense) + Dump() interface{} +} + +var AcquisitionSources = []struct { + name string + iface func() DataSource +}{ + { + name: "file", + iface: func() DataSource { return &fileacquisition.FileSource{} }, + }, + { + name: "journalctl", + iface: func() DataSource { return &journalctlacquisition.JournalCtlSource{} }, + }, + { + name: "cloudwatch", + iface: func() DataSource { return &cloudwatchacquisition.CloudwatchSource{} }, + }, + { + name: "syslog", + iface: func() DataSource { return &syslogacquisition.SyslogSource{} }, + }, + { + name: "docker", + iface: func() DataSource { return &dockeracquisition.DockerSource{} }, + }, + { + name: "kinesis", + iface: func() DataSource { return &kinesisacquisition.KinesisSource{} }, + }, + { + name: "wineventlog", + iface: func() DataSource { return &wineventlogacquisition.WinEventLogSource{} }, + }, + { + name: "kafka", + iface: func() DataSource { return &kafkaacquisition.KafkaSource{} }, + }, +} + +func GetDataSourceIface(dataSourceType string) DataSource { + for _, source := range AcquisitionSources { + if source.name == dataSourceType { + return source.iface() + } + } + return nil +} + +func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg) (*DataSource, error) { + + //we dump it back to []byte, because we want to decode the yaml blob twice : + //once to DataSourceCommonCfg, and then later to the dedicated type of the datasource + yamlConfig, err := yaml.Marshal(commonConfig) + if err != nil { + return nil, errors.Wrap(err, "unable to marshal back interface") + } + if dataSrc := GetDataSourceIface(commonConfig.Source); dataSrc != nil { + /* this logger will then be used by the datasource at runtime */ + clog := log.New() + if err := types.ConfigureLogger(clog); err != nil { + return nil, errors.Wrap(err, "while configuring datasource logger") + } + if commonConfig.LogLevel != nil { + clog.SetLevel(*commonConfig.LogLevel) + } + customLog := log.Fields{ + "type": commonConfig.Source, + } + if commonConfig.Name != "" { + customLog["name"] = commonConfig.Name + } + subLogger := clog.WithFields(customLog) + /* check eventual dependencies are satisfied (ie. journald will check journalctl availability) */ + if err := dataSrc.CanRun(); err != nil { + return nil, errors.Wrapf(err, "datasource %s cannot be run", commonConfig.Source) + } + /* configure the actual datasource */ + if err := dataSrc.Configure(yamlConfig, subLogger); err != nil { + return nil, errors.Wrapf(err, "failed to configure datasource %s", commonConfig.Source) + + } + return &dataSrc, nil + } + return nil, fmt.Errorf("cannot find source %s", commonConfig.Source) +} + +//detectBackwardCompatAcquis : try to magically detect the type for backward compat (type was not mandatory then) +func detectBackwardCompatAcquis(sub configuration.DataSourceCommonCfg) string { + + if _, ok := sub.Config["filename"]; ok { + return "file" + } + if _, ok := sub.Config["filenames"]; ok { + return "file" + } + if _, ok := sub.Config["journalctl_filter"]; ok { + return "journalctl" + } + return "" +} + +func LoadAcquisitionFromDSN(dsn string, labels map[string]string) ([]DataSource, error) { + var sources []DataSource + + frags := strings.Split(dsn, ":") + if len(frags) == 1 { + return nil, fmt.Errorf("%s isn't valid dsn (no protocol)", dsn) + } + dataSrc := GetDataSourceIface(frags[0]) + if dataSrc == nil { + return nil, fmt.Errorf("no acquisition for protocol %s://", frags[0]) + } + /* this logger will then be used by the datasource at runtime */ + clog := log.New() + if err := types.ConfigureLogger(clog); err != nil { + return nil, errors.Wrap(err, "while configuring datasource logger") + } + subLogger := clog.WithFields(log.Fields{ + "type": dsn, + }) + err := dataSrc.ConfigureByDSN(dsn, labels, subLogger) + if err != nil { + return nil, errors.Wrapf(err, "while configuration datasource for %s", dsn) + } + sources = append(sources, dataSrc) + return sources, nil +} + +// LoadAcquisitionFromFile unmarshals the configuration item and checks its availability +func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg) ([]DataSource, error) { + + var sources []DataSource + + for _, acquisFile := range config.AcquisitionFiles { + log.Infof("loading acquisition file : %s", acquisFile) + yamlFile, err := os.Open(acquisFile) + if err != nil { + return nil, err + } + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + for { + var sub configuration.DataSourceCommonCfg + var idx int + err = dec.Decode(&sub) + if err != nil { + if err == io.EOF { + log.Tracef("End of yaml file") + break + } + return nil, errors.Wrapf(err, "failed to yaml decode %s", acquisFile) + } + + //for backward compat ('type' was not mandatory, detect it) + if guessType := detectBackwardCompatAcquis(sub); guessType != "" { + sub.Source = guessType + } + //it's an empty item, skip it + if len(sub.Labels) == 0 { + if sub.Source == "" { + log.Debugf("skipping empty item in %s", acquisFile) + idx += 1 + continue + } + return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) + } + if sub.Source == "" { + return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) + } + if GetDataSourceIface(sub.Source) == nil { + return nil, fmt.Errorf("unknown data source %s in %s (position: %d)", sub.Source, acquisFile, idx) + } + src, err := DataSourceConfigure(sub) + if err != nil { + return nil, errors.Wrapf(err, "while configuring datasource of type %s from %s (position: %d)", sub.Source, acquisFile, idx) + } + sources = append(sources, *src) + idx += 1 + } + } + return sources, nil +} + +func GetMetrics(sources []DataSource, aggregated bool) error { + var metrics []prometheus.Collector + for i := 0; i < len(sources); i++ { + if aggregated { + metrics = sources[i].GetMetrics() + } else { + metrics = sources[i].GetAggregMetrics() + } + for _, metric := range metrics { + if err := prometheus.Register(metric); err != nil { + if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { + return errors.Wrapf(err, "could not register metrics for datasource %s", sources[i].GetName()) + } + //ignore the error + } + } + + } + return nil +} + +func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { + for i := 0; i < len(sources); i++ { + subsrc := sources[i] //ensure its a copy + log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) + + AcquisTomb.Go(func() error { + defer types.CatchPanic("crowdsec/acquis") + var err error + if subsrc.GetMode() == configuration.TAIL_MODE { + err = subsrc.StreamingAcquisition(output, AcquisTomb) + } else { + err = subsrc.OneShotAcquisition(output, AcquisTomb) + } + if err != nil { + //if one of the acqusition returns an error, we kill the others to properly shutdown + AcquisTomb.Kill(err) + } + return nil + }) + } + // Don't wait if we have no sources, as it will hang forever + if len(sources) > 0 { + /*return only when acquisition is over (cat) or never (tail)*/ + err := AcquisTomb.Wait() + return err + } + return nil +} diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go new file mode 100644 index 0000000..a547970 --- /dev/null +++ b/pkg/acquisition/acquisition_test.go @@ -0,0 +1,546 @@ +package acquisition + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + tomb "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +type MockSource struct { + configuration.DataSourceCommonCfg `yaml:",inline"` + Toto string `yaml:"toto"` + logger *log.Entry +} + +func (f *MockSource) Configure(cfg []byte, logger *log.Entry) error { + f.logger = logger + if err := yaml.UnmarshalStrict(cfg, &f); err != nil { + return errors.Wrap(err, "while unmarshaling to reader specific config") + } + if f.Mode == "" { + f.Mode = configuration.CAT_MODE + } + if f.Mode != configuration.CAT_MODE && f.Mode != configuration.TAIL_MODE { + return fmt.Errorf("mode %s is not supported", f.Mode) + } + if f.Toto == "" { + return fmt.Errorf("expect non-empty toto") + } + return nil +} +func (f *MockSource) GetMode() string { return f.Mode } +func (f *MockSource) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } +func (f *MockSource) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil } +func (f *MockSource) CanRun() error { return nil } +func (f *MockSource) GetMetrics() []prometheus.Collector { return nil } +func (f *MockSource) GetAggregMetrics() []prometheus.Collector { return nil } +func (f *MockSource) Dump() interface{} { return f } +func (f *MockSource) GetName() string { return "mock" } +func (f *MockSource) ConfigureByDSN(string, map[string]string, *log.Entry) error { + return fmt.Errorf("not supported") +} + +// copy the mocksource, but this one can't run +type MockSourceCantRun struct { + MockSource +} + +func (f *MockSourceCantRun) CanRun() error { return fmt.Errorf("can't run bro") } +func (f *MockSourceCantRun) GetName() string { return "mock_cant_run" } + +// appendMockSource is only used to add mock source for tests +func appendMockSource() { + if GetDataSourceIface("mock") == nil { + mock := struct { + name string + iface func() DataSource + }{ + name: "mock", + iface: func() DataSource { return &MockSource{} }, + } + AcquisitionSources = append(AcquisitionSources, mock) + } + if GetDataSourceIface("mock_cant_run") == nil { + mock := struct { + name string + iface func() DataSource + }{ + name: "mock_cant_run", + iface: func() DataSource { return &MockSourceCantRun{} }, + } + AcquisitionSources = append(AcquisitionSources, mock) + } +} + +func TestDataSourceConfigure(t *testing.T) { + appendMockSource() + tests := []struct { + TestName string + String string + ExpectedError string + }{ + { + TestName: "basic_valid_config", + String: ` +mode: cat +labels: + test: foobar +log_level: info +source: mock +toto: test_value1 +`, + }, + { + TestName: "basic_debug_config", + String: ` +mode: cat +labels: + test: foobar +log_level: debug +source: mock +toto: test_value1 +`, + }, + { + TestName: "basic_tailmode_config", + String: ` +mode: tail +labels: + test: foobar +log_level: debug +source: mock +toto: test_value1 +`, + }, + { + TestName: "bad_mode_config", + String: ` +mode: ratata +labels: + test: foobar +log_level: debug +source: mock +toto: test_value1 +`, + ExpectedError: "failed to configure datasource mock: mode ratata is not supported", + }, + { + TestName: "bad_type_config", + String: ` +mode: cat +labels: + test: foobar +log_level: debug +source: tutu +`, + ExpectedError: "cannot find source tutu", + }, + { + TestName: "mismatch_config", + String: ` +mode: cat +labels: + test: foobar +log_level: debug +source: mock +wowo: ajsajasjas +`, + ExpectedError: "field wowo not found in type acquisition.MockSource", + }, + { + TestName: "cant_run_error", + String: ` +mode: cat +labels: + test: foobar +log_level: debug +source: mock_cant_run +wowo: ajsajasjas +`, + ExpectedError: "datasource mock_cant_run cannot be run: can't run bro", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.TestName, func(t *testing.T) { + common := configuration.DataSourceCommonCfg{} + yaml.Unmarshal([]byte(tc.String), &common) + ds, err := DataSourceConfigure(common) + cstest.RequireErrorContains(t, err, tc.ExpectedError) + if tc.ExpectedError == "" { + return + } + + switch tc.TestName { + case "basic_valid_config": + mock := (*ds).Dump().(*MockSource) + assert.Equal(t, mock.Toto, "test_value1") + assert.Equal(t, mock.Mode, "cat") + assert.Equal(t, mock.logger.Logger.Level, log.InfoLevel) + assert.Equal(t, mock.Labels, map[string]string{"test": "foobar"}) + case "basic_debug_config": + mock := (*ds).Dump().(*MockSource) + assert.Equal(t, mock.Toto, "test_value1") + assert.Equal(t, mock.Mode, "cat") + assert.Equal(t, mock.logger.Logger.Level, log.DebugLevel) + assert.Equal(t, mock.Labels, map[string]string{"test": "foobar"}) + case "basic_tailmode_config": + mock := (*ds).Dump().(*MockSource) + assert.Equal(t, mock.Toto, "test_value1") + assert.Equal(t, mock.Mode, "tail") + assert.Equal(t, mock.logger.Logger.Level, log.DebugLevel) + assert.Equal(t, mock.Labels, map[string]string{"test": "foobar"}) + } + }) + } +} + +func TestLoadAcquisitionFromFile(t *testing.T) { + appendMockSource() + tests := []struct { + TestName string + Config csconfig.CrowdsecServiceCfg + ExpectedError string + ExpectedLen int + }{ + { + TestName: "non_existent_file", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"does_not_exist"}, + }, + ExpectedError: "open does_not_exist: " + cstest.FileNotFoundMessage, + ExpectedLen: 0, + }, + { + TestName: "invalid_yaml_file", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"test_files/badyaml.yaml"}, + }, + ExpectedError: "failed to yaml decode test_files/badyaml.yaml: yaml: unmarshal errors", + ExpectedLen: 0, + }, + { + TestName: "invalid_empty_yaml", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"test_files/emptyitem.yaml"}, + }, + ExpectedLen: 0, + }, + { + TestName: "basic_valid", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"test_files/basic_filemode.yaml"}, + }, + ExpectedLen: 2, + }, + { + TestName: "missing_labels", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"test_files/missing_labels.yaml"}, + }, + ExpectedError: "missing labels in test_files/missing_labels.yaml", + }, + { + TestName: "backward_compat", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"test_files/backward_compat.yaml"}, + }, + ExpectedLen: 2, + }, + { + TestName: "bad_type", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"test_files/bad_source.yaml"}, + }, + ExpectedError: "unknown data source does_not_exist in test_files/bad_source.yaml", + }, + { + TestName: "invalid_filetype_config", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"test_files/bad_filetype.yaml"}, + }, + ExpectedError: "while configuring datasource of type file from test_files/bad_filetype.yaml", + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.TestName, func(t *testing.T) { + dss, err := LoadAcquisitionFromFile(&tc.Config) + cstest.RequireErrorContains(t, err, tc.ExpectedError) + if tc.ExpectedError != "" { + return + } + + assert.Len(t, dss, tc.ExpectedLen) + }) + + } +} + +/* + test start acquisition : + - create mock parser in cat mode : start acquisition, check it returns, count items in chan + - create mock parser in tail mode : start acquisition, sleep, check item count, tomb kill it, wait for it to return +*/ + +type MockCat struct { + configuration.DataSourceCommonCfg `yaml:",inline"` + logger *log.Entry +} + +func (f *MockCat) Configure(cfg []byte, logger *log.Entry) error { + f.logger = logger + if f.Mode == "" { + f.Mode = configuration.CAT_MODE + } + if f.Mode != configuration.CAT_MODE { + return fmt.Errorf("mode %s is not supported", f.Mode) + } + return nil +} +func (f *MockCat) GetName() string { return "mock_cat" } +func (f *MockCat) GetMode() string { return "cat" } +func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { + for i := 0; i < 10; i++ { + evt := types.Event{} + evt.Line.Src = "test" + out <- evt + } + return nil +} +func (f *MockCat) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { + return fmt.Errorf("can't run in tail") +} +func (f *MockCat) CanRun() error { return nil } +func (f *MockCat) GetMetrics() []prometheus.Collector { return nil } +func (f *MockCat) GetAggregMetrics() []prometheus.Collector { return nil } +func (f *MockCat) Dump() interface{} { return f } +func (f *MockCat) ConfigureByDSN(string, map[string]string, *log.Entry) error { + return fmt.Errorf("not supported") +} + +//---- + +type MockTail struct { + configuration.DataSourceCommonCfg `yaml:",inline"` + logger *log.Entry +} + +func (f *MockTail) Configure(cfg []byte, logger *log.Entry) error { + f.logger = logger + if f.Mode == "" { + f.Mode = configuration.TAIL_MODE + } + if f.Mode != configuration.TAIL_MODE { + return fmt.Errorf("mode %s is not supported", f.Mode) + } + return nil +} +func (f *MockTail) GetName() string { return "mock_tail" } +func (f *MockTail) GetMode() string { return "tail" } +func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { + return fmt.Errorf("can't run in cat mode") +} +func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + for i := 0; i < 10; i++ { + evt := types.Event{} + evt.Line.Src = "test" + out <- evt + } + <-t.Dying() + return nil +} +func (f *MockTail) CanRun() error { return nil } +func (f *MockTail) GetMetrics() []prometheus.Collector { return nil } +func (f *MockTail) GetAggregMetrics() []prometheus.Collector { return nil } +func (f *MockTail) Dump() interface{} { return f } +func (f *MockTail) ConfigureByDSN(string, map[string]string, *log.Entry) error { + return fmt.Errorf("not supported") +} + +//func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { + +func TestStartAcquisitionCat(t *testing.T) { + sources := []DataSource{ + &MockCat{}, + } + out := make(chan types.Event) + acquisTomb := tomb.Tomb{} + + go func() { + if err := StartAcquisition(sources, out, &acquisTomb); err != nil { + t.Errorf("unexpected error") + } + }() + + count := 0 +READLOOP: + for { + select { + case <-out: + count++ + case <-time.After(1 * time.Second): + break READLOOP + } + } + + assert.Equal(t, 10, count) +} + +func TestStartAcquisitionTail(t *testing.T) { + sources := []DataSource{ + &MockTail{}, + } + out := make(chan types.Event) + acquisTomb := tomb.Tomb{} + + go func() { + if err := StartAcquisition(sources, out, &acquisTomb); err != nil { + t.Errorf("unexpected error") + } + }() + + count := 0 +READLOOP: + for { + select { + case <-out: + count++ + case <-time.After(1 * time.Second): + break READLOOP + } + } + + assert.Equal(t, 10, count) + + acquisTomb.Kill(nil) + time.Sleep(1 * time.Second) + require.NoError(t, acquisTomb.Err(), "tomb is not dead") +} + +type MockTailError struct { + MockTail +} + +func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + for i := 0; i < 10; i++ { + evt := types.Event{} + evt.Line.Src = "test" + out <- evt + } + t.Kill(fmt.Errorf("got error (tomb)")) + return fmt.Errorf("got error") +} + +func TestStartAcquisitionTailError(t *testing.T) { + sources := []DataSource{ + &MockTailError{}, + } + out := make(chan types.Event) + acquisTomb := tomb.Tomb{} + + go func() { + if err := StartAcquisition(sources, out, &acquisTomb); err != nil && err.Error() != "got error (tomb)" { + t.Errorf("expected error, got '%s'", err) + } + }() + + count := 0 +READLOOP: + for { + select { + case <-out: + count++ + case <-time.After(1 * time.Second): + break READLOOP + } + } + assert.Equal(t, 10, count) + //acquisTomb.Kill(nil) + time.Sleep(1 * time.Second) + cstest.RequireErrorContains(t, acquisTomb.Err(), "got error (tomb)") +} + +type MockSourceByDSN struct { + configuration.DataSourceCommonCfg `yaml:",inline"` + Toto string `yaml:"toto"` + logger *log.Entry //nolint: unused +} + +func (f *MockSourceByDSN) Configure(cfg []byte, logger *log.Entry) error { return nil } +func (f *MockSourceByDSN) GetMode() string { return f.Mode } +func (f *MockSourceByDSN) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } +func (f *MockSourceByDSN) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil } +func (f *MockSourceByDSN) CanRun() error { return nil } +func (f *MockSourceByDSN) GetMetrics() []prometheus.Collector { return nil } +func (f *MockSourceByDSN) GetAggregMetrics() []prometheus.Collector { return nil } +func (f *MockSourceByDSN) Dump() interface{} { return f } +func (f *MockSourceByDSN) GetName() string { return "mockdsn" } +func (f *MockSourceByDSN) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error { + dsn = strings.TrimPrefix(dsn, "mockdsn://") + if dsn != "test_expect" { + return fmt.Errorf("unexpected value") + } + return nil +} + +func TestConfigureByDSN(t *testing.T) { + tests := []struct { + dsn string + ExpectedError string + ExpectedResLen int + }{ + { + dsn: "baddsn", + ExpectedError: "baddsn isn't valid dsn (no protocol)", + }, + { + dsn: "foobar://toto", + ExpectedError: "no acquisition for protocol foobar://", + }, + { + dsn: "mockdsn://test_expect", + ExpectedResLen: 1, + }, + { + dsn: "mockdsn://bad", + ExpectedError: "unexpected value", + }, + } + + if GetDataSourceIface("mockdsn") == nil { + mock := struct { + name string + iface func() DataSource + }{ + name: "mockdsn", + iface: func() DataSource { return &MockSourceByDSN{} }, + } + AcquisitionSources = append(AcquisitionSources, mock) + } + + for _, tc := range tests { + tc := tc + t.Run(tc.dsn, func(t *testing.T) { + srcs, err := LoadAcquisitionFromDSN(tc.dsn, map[string]string{"type": "test_label"}) + cstest.RequireErrorContains(t, err, tc.ExpectedError) + + assert.Len(t, srcs, tc.ExpectedResLen) + }) + } +} diff --git a/pkg/acquisition/configuration/configuration.go b/pkg/acquisition/configuration/configuration.go new file mode 100644 index 0000000..41d31ef --- /dev/null +++ b/pkg/acquisition/configuration/configuration.go @@ -0,0 +1,19 @@ +package configuration + +import ( + log "github.com/sirupsen/logrus" +) + +type DataSourceCommonCfg struct { + Mode string `yaml:"mode,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + LogLevel *log.Level `yaml:"log_level,omitempty"` + Source string `yaml:"source,omitempty"` + Name string `yaml:"name,omitempty"` + UseTimeMachine bool `yaml:"use_time_machine,omitempty"` + Config map[string]interface{} `yaml:",inline"` //to keep the datasource-specific configuration directives +} + +var TAIL_MODE = "tail" +var CAT_MODE = "cat" +var SERVER_MODE = "server" // No difference with tail, just a bit more verbose diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go new file mode 100644 index 0000000..9a3a66d --- /dev/null +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -0,0 +1,686 @@ +package cloudwatchacquisition + +import ( + "context" + "fmt" + "net/url" + "os" + "regexp" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" + + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +var openedStreams = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "cs_cloudwatch_openstreams_total", + Help: "Number of opened stream within group.", + }, + []string{"group"}, +) + +var streamIndexMutex = sync.Mutex{} + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_cloudwatch_stream_hits_total", + Help: "Number of event read from stream.", + }, + []string{"group", "stream"}, +) + +//CloudwatchSource is the runtime instance keeping track of N streams within 1 cloudwatch group +type CloudwatchSource struct { + Config CloudwatchSourceConfiguration + /*runtime stuff*/ + logger *log.Entry + t *tomb.Tomb + cwClient *cloudwatchlogs.CloudWatchLogs + monitoredStreams []*LogStreamTailConfig + streamIndexes map[string]string +} + +//CloudwatchSourceConfiguration allows user to define one or more streams to monitor within a cloudwatch log group +type CloudwatchSourceConfiguration struct { + configuration.DataSourceCommonCfg `yaml:",inline"` + GroupName string `yaml:"group_name"` //the group name to be monitored + StreamRegexp *string `yaml:"stream_regexp,omitempty"` //allow to filter specific streams + StreamName *string `yaml:"stream_name,omitempty"` + StartTime, EndTime *time.Time `yaml:"-"` + DescribeLogStreamsLimit *int64 `yaml:"describelogstreams_limit,omitempty"` //batch size for DescribeLogStreamsPagesWithContext + GetLogEventsPagesLimit *int64 `yaml:"getlogeventspages_limit,omitempty"` + PollNewStreamInterval *time.Duration `yaml:"poll_new_stream_interval,omitempty"` //frequency at which we poll for new streams within the log group + MaxStreamAge *time.Duration `yaml:"max_stream_age,omitempty"` //monitor only streams that have been updated within $duration + PollStreamInterval *time.Duration `yaml:"poll_stream_interval,omitempty"` //frequency at which we poll each stream + StreamReadTimeout *time.Duration `yaml:"stream_read_timeout,omitempty"` //stop monitoring streams that haven't been updated within $duration, might be reopened later tho + AwsApiCallTimeout *time.Duration `yaml:"aws_api_timeout,omitempty"` + AwsProfile *string `yaml:"aws_profile,omitempty"` + PrependCloudwatchTimestamp *bool `yaml:"prepend_cloudwatch_timestamp,omitempty"` + AwsConfigDir *string `yaml:"aws_config_dir,omitempty"` + AwsRegion *string `yaml:"aws_region,omitempty"` +} + +//LogStreamTailConfig is the configuration for one given stream within one group +type LogStreamTailConfig struct { + GroupName string + StreamName string + GetLogEventsPagesLimit int64 + PollStreamInterval time.Duration + StreamReadTimeout time.Duration + PrependCloudwatchTimestamp *bool + Labels map[string]string + logger *log.Entry + ExpectMode int + t tomb.Tomb + StartTime, EndTime time.Time //only used for CatMode +} + +var ( + def_DescribeLogStreamsLimit = int64(50) + def_PollNewStreamInterval = 10 * time.Second + def_MaxStreamAge = 5 * time.Minute + def_PollStreamInterval = 10 * time.Second + def_AwsApiCallTimeout = 10 * time.Second + def_StreamReadTimeout = 10 * time.Minute + def_PollDeadStreamInterval = 10 * time.Second + def_GetLogEventsPagesLimit = int64(1000) + def_AwsConfigDir = "" +) + +func (cw *CloudwatchSource) Configure(cfg []byte, logger *log.Entry) error { + cwConfig := CloudwatchSourceConfiguration{} + targetStream := "*" + if err := yaml.UnmarshalStrict(cfg, &cwConfig); err != nil { + return errors.Wrap(err, "Cannot parse CloudwatchSource configuration") + } + cw.Config = cwConfig + if len(cw.Config.GroupName) == 0 { + return fmt.Errorf("group_name is mandatory for CloudwatchSource") + } + cw.logger = logger.WithField("group", cw.Config.GroupName) + if cw.Config.Mode == "" { + cw.Config.Mode = configuration.TAIL_MODE + } + logger.Debugf("Starting configuration for Cloudwatch group %s", cw.Config.GroupName) + + if cw.Config.DescribeLogStreamsLimit == nil { + cw.Config.DescribeLogStreamsLimit = &def_DescribeLogStreamsLimit + } + logger.Tracef("describelogstreams_limit set to %d", *cw.Config.DescribeLogStreamsLimit) + if cw.Config.PollNewStreamInterval == nil { + cw.Config.PollNewStreamInterval = &def_PollNewStreamInterval + } + logger.Tracef("poll_new_stream_interval set to %v", *cw.Config.PollNewStreamInterval) + if cw.Config.MaxStreamAge == nil { + cw.Config.MaxStreamAge = &def_MaxStreamAge + } + logger.Tracef("max_stream_age set to %v", *cw.Config.MaxStreamAge) + if cw.Config.PollStreamInterval == nil { + cw.Config.PollStreamInterval = &def_PollStreamInterval + } + logger.Tracef("poll_stream_interval set to %v", *cw.Config.PollStreamInterval) + if cw.Config.StreamReadTimeout == nil { + cw.Config.StreamReadTimeout = &def_StreamReadTimeout + } + logger.Tracef("stream_read_timeout set to %v", *cw.Config.StreamReadTimeout) + if cw.Config.GetLogEventsPagesLimit == nil { + cw.Config.GetLogEventsPagesLimit = &def_GetLogEventsPagesLimit + } + logger.Tracef("getlogeventspages_limit set to %v", *cw.Config.GetLogEventsPagesLimit) + if cw.Config.AwsApiCallTimeout == nil { + cw.Config.AwsApiCallTimeout = &def_AwsApiCallTimeout + } + logger.Tracef("aws_api_timeout set to %v", *cw.Config.AwsApiCallTimeout) + if *cw.Config.MaxStreamAge > *cw.Config.StreamReadTimeout { + logger.Warningf("max_stream_age > stream_read_timeout, stream might keep being opened/closed") + } + if cw.Config.AwsConfigDir == nil { + cw.Config.AwsConfigDir = &def_AwsConfigDir + } + logger.Tracef("aws_config_dir set to %s", *cw.Config.AwsConfigDir) + if *cw.Config.AwsConfigDir != "" { + _, err := os.Stat(*cw.Config.AwsConfigDir) + if err != nil { + logger.Errorf("can't read aws_config_dir '%s' got err %s", *cw.Config.AwsConfigDir, err) + return fmt.Errorf("can't read aws_config_dir %s got err %s ", *cw.Config.AwsConfigDir, err) + } + os.Setenv("AWS_SDK_LOAD_CONFIG", "1") + //as aws sdk relies on $HOME, let's allow the user to override it :) + os.Setenv("AWS_CONFIG_FILE", fmt.Sprintf("%s/config", *cw.Config.AwsConfigDir)) + os.Setenv("AWS_SHARED_CREDENTIALS_FILE", fmt.Sprintf("%s/credentials", *cw.Config.AwsConfigDir)) + } else { + if cw.Config.AwsRegion == nil { + logger.Errorf("aws_region is not specified, specify it or aws_config_dir") + return fmt.Errorf("aws_region is not specified, specify it or aws_config_dir") + } + os.Setenv("AWS_REGION", *cw.Config.AwsRegion) + } + + if err := cw.newClient(); err != nil { + return err + } + cw.streamIndexes = make(map[string]string) + if cw.Config.StreamRegexp != nil { + if _, err := regexp.Compile(*cw.Config.StreamRegexp); err != nil { + return errors.Wrapf(err, "error while compiling regexp '%s'", *cw.Config.StreamRegexp) + } + targetStream = *cw.Config.StreamRegexp + } else if cw.Config.StreamName != nil { + targetStream = *cw.Config.StreamName + } + + logger.Infof("Adding cloudwatch group '%s' (stream:%s) to datasources", cw.Config.GroupName, targetStream) + return nil +} + +func (cw *CloudwatchSource) newClient() error { + var sess *session.Session + + if cw.Config.AwsProfile != nil { + sess = session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + Profile: *cw.Config.AwsProfile, + })) + } else { + sess = session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + } + + if sess == nil { + return fmt.Errorf("failed to create aws session") + } + if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { + cw.logger.Debugf("[testing] overloading endpoint with %s", v) + cw.cwClient = cloudwatchlogs.New(sess, aws.NewConfig().WithEndpoint(v)) + } else { + cw.cwClient = cloudwatchlogs.New(sess) + } + if cw.cwClient == nil { + return fmt.Errorf("failed to create cloudwatch client") + } + return nil +} + +func (cw *CloudwatchSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + cw.t = t + monitChan := make(chan LogStreamTailConfig) + t.Go(func() error { + return cw.LogStreamManager(monitChan, out) + }) + return cw.WatchLogGroupForStreams(monitChan) +} + +func (cw *CloudwatchSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead, openedStreams} +} + +func (cw *CloudwatchSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead, openedStreams} +} + +func (cw *CloudwatchSource) GetMode() string { + return cw.Config.Mode +} + +func (cw *CloudwatchSource) GetName() string { + return "cloudwatch" +} + +func (cw *CloudwatchSource) CanRun() error { + return nil +} + +func (cw *CloudwatchSource) Dump() interface{} { + return cw +} + +func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig) error { + cw.logger.Debugf("Starting to watch group (interval:%s)", cw.Config.PollNewStreamInterval) + ticker := time.NewTicker(*cw.Config.PollNewStreamInterval) + var startFrom *string + + for { + select { + case <-cw.t.Dying(): + cw.logger.Infof("stopping group watch") + return nil + case <-ticker.C: + hasMoreStreams := true + startFrom = nil + for hasMoreStreams { + cw.logger.Tracef("doing the call to DescribeLogStreamsPagesWithContext") + + ctx := context.Background() + //there can be a lot of streams in a group, and we're only interested in those recently written to, so we sort by LastEventTime + err := cw.cwClient.DescribeLogStreamsPagesWithContext( + ctx, + &cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: aws.String(cw.Config.GroupName), + Descending: aws.Bool(true), + NextToken: startFrom, + OrderBy: aws.String(cloudwatchlogs.OrderByLastEventTime), + Limit: cw.Config.DescribeLogStreamsLimit, + }, + func(page *cloudwatchlogs.DescribeLogStreamsOutput, lastPage bool) bool { + cw.logger.Tracef("in helper of DescribeLogStreamsPagesWithContext") + for _, event := range page.LogStreams { + startFrom = page.NextToken + //we check if the stream has been written to recently enough to be monitored + if event.LastIngestionTime != nil { + //aws uses millisecond since the epoch + oldest := time.Now().UTC().Add(-*cw.Config.MaxStreamAge) + //TBD : verify that this is correct : Unix 2nd arg expects Nanoseconds, and have a code that is more explicit. + LastIngestionTime := time.Unix(0, *event.LastIngestionTime*int64(time.Millisecond)) + if LastIngestionTime.Before(oldest) { + cw.logger.Tracef("stop iteration, %s reached oldest age, stop (%s < %s)", *event.LogStreamName, LastIngestionTime, time.Now().UTC().Add(-*cw.Config.MaxStreamAge)) + hasMoreStreams = false + return false + } + cw.logger.Tracef("stream %s is elligible for monitoring", *event.LogStreamName) + //the stream has been updated recently, check if we should monitor it + var expectMode int + if !cw.Config.UseTimeMachine { + expectMode = leaky.LIVE + } else { + expectMode = leaky.TIMEMACHINE + } + monitorStream := LogStreamTailConfig{ + GroupName: cw.Config.GroupName, + StreamName: *event.LogStreamName, + GetLogEventsPagesLimit: *cw.Config.GetLogEventsPagesLimit, + PollStreamInterval: *cw.Config.PollStreamInterval, + StreamReadTimeout: *cw.Config.StreamReadTimeout, + PrependCloudwatchTimestamp: cw.Config.PrependCloudwatchTimestamp, + ExpectMode: expectMode, + Labels: cw.Config.Labels, + } + out <- monitorStream + } + } + if lastPage { + cw.logger.Tracef("reached last page") + hasMoreStreams = false + } + return true + }, + ) + if err != nil { + newerr := errors.Wrapf(err, "while describing group %s", cw.Config.GroupName) + return newerr + } + cw.logger.Tracef("after DescribeLogStreamsPagesWithContext") + } + } + } +} + +//LogStreamManager receives the potential streams to monitor, and starts a go routine when needed +func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outChan chan types.Event) error { + + cw.logger.Debugf("starting to monitor streams for %s", cw.Config.GroupName) + pollDeadStreamInterval := time.NewTicker(def_PollDeadStreamInterval) + + for { + select { + case newStream := <-in: //nolint:govet // copylocks won't matter if the tomb is not initialized + shouldCreate := true + cw.logger.Tracef("received new streams to monitor : %s/%s", newStream.GroupName, newStream.StreamName) + + if cw.Config.StreamName != nil && newStream.StreamName != *cw.Config.StreamName { + cw.logger.Tracef("stream %s != %s", newStream.StreamName, *cw.Config.StreamName) + continue + } + + if cw.Config.StreamRegexp != nil { + match, err := regexp.Match(*cw.Config.StreamRegexp, []byte(newStream.StreamName)) + if err != nil { + cw.logger.Warningf("invalid regexp : %s", err) + } else if !match { + cw.logger.Tracef("stream %s doesn't match %s", newStream.StreamName, *cw.Config.StreamRegexp) + continue + } + } + + for idx, stream := range cw.monitoredStreams { + if newStream.GroupName == stream.GroupName && newStream.StreamName == stream.StreamName { + //stream exists, but is dead, remove it from list + if !stream.t.Alive() { + cw.logger.Debugf("stream %s already exists, but is dead", newStream.StreamName) + cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...) + openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Dec() + break + } + shouldCreate = false + break + } + } + + //let's start watching this stream + if shouldCreate { + openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() + newStream.t = tomb.Tomb{} + newStream.logger = cw.logger.WithFields(log.Fields{"stream": newStream.StreamName}) + cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) + newStream.t.Go(func() error { + return cw.TailLogStream(&newStream, outChan) + }) + cw.monitoredStreams = append(cw.monitoredStreams, &newStream) + } + case <-pollDeadStreamInterval.C: + newMonitoredStreams := cw.monitoredStreams[:0] + for idx, stream := range cw.monitoredStreams { + if !cw.monitoredStreams[idx].t.Alive() { + cw.logger.Debugf("remove dead stream %s", stream.StreamName) + openedStreams.With(prometheus.Labels{"group": cw.monitoredStreams[idx].GroupName}).Dec() + } else { + newMonitoredStreams = append(newMonitoredStreams, stream) + } + } + cw.monitoredStreams = newMonitoredStreams + case <-cw.t.Dying(): + cw.logger.Infof("LogStreamManager for %s is dying, %d alive streams", cw.Config.GroupName, len(cw.monitoredStreams)) + for idx, stream := range cw.monitoredStreams { + if cw.monitoredStreams[idx].t.Alive() { + cw.logger.Debugf("killing stream %s", stream.StreamName) + cw.monitoredStreams[idx].t.Kill(nil) + if err := cw.monitoredStreams[idx].t.Wait(); err != nil { + cw.logger.Debugf("error while waiting for death of %s : %s", stream.StreamName, err) + } + } + } + cw.monitoredStreams = nil + cw.logger.Debugf("routine cleanup done, return") + return nil + } + } +} + +func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error { + var startFrom *string + lastReadMessage := time.Now().UTC() + ticker := time.NewTicker(cfg.PollStreamInterval) + //resume at existing index if we already had + streamIndexMutex.Lock() + v := cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] + streamIndexMutex.Unlock() + if v != "" { + cfg.logger.Debugf("restarting on index %s", v) + startFrom = &v + } + /*during first run, we want to avoid reading any message, but just get a token. + if we don't, we might end up sending the same item several times. hence the 'startup' hack */ + for { + select { + case <-ticker.C: + cfg.logger.Tracef("entering loop") + hasMorePages := true + for hasMorePages { + /*for the first call, we only consume the last item*/ + cfg.logger.Tracef("calling GetLogEventsPagesWithContext") + ctx := context.Background() + err := cw.cwClient.GetLogEventsPagesWithContext(ctx, + &cloudwatchlogs.GetLogEventsInput{ + Limit: aws.Int64(cfg.GetLogEventsPagesLimit), + LogGroupName: aws.String(cfg.GroupName), + LogStreamName: aws.String(cfg.StreamName), + NextToken: startFrom, + StartFromHead: aws.Bool(true), + }, + func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool { + cfg.logger.Tracef("%d results, last:%t", len(page.Events), lastPage) + startFrom = page.NextForwardToken + if page.NextForwardToken != nil { + streamIndexMutex.Lock() + cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] = *page.NextForwardToken + streamIndexMutex.Unlock() + } + if lastPage { /*wait another ticker to check on new log availability*/ + cfg.logger.Tracef("last page") + hasMorePages = false + } + if len(page.Events) > 0 { + lastReadMessage = time.Now().UTC() + } + for _, event := range page.Events { + evt, err := cwLogToEvent(event, cfg) + if err != nil { + cfg.logger.Warningf("cwLogToEvent error, discarded event : %s", err) + } else { + cfg.logger.Debugf("pushing message : %s", evt.Line.Raw) + linesRead.With(prometheus.Labels{"group": cfg.GroupName, "stream": cfg.StreamName}).Inc() + outChan <- evt + } + } + return true + }, + ) + if err != nil { + newerr := errors.Wrapf(err, "while reading %s/%s", cfg.GroupName, cfg.StreamName) + cfg.logger.Warningf("err : %s", newerr) + return newerr + } + cfg.logger.Tracef("done reading GetLogEventsPagesWithContext") + if time.Since(lastReadMessage) > cfg.StreamReadTimeout { + cfg.logger.Infof("%s/%s reached timeout (%s) (last message was %s)", cfg.GroupName, cfg.StreamName, time.Since(lastReadMessage), + lastReadMessage) + return nil + } + } + case <-cfg.t.Dying(): + cfg.logger.Infof("logstream tail stopping") + return fmt.Errorf("killed") + } + } +} + +func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error { + cw.logger = logger + + dsn = strings.TrimPrefix(dsn, cw.GetName()+"://") + args := strings.Split(dsn, "?") + if len(args) != 2 { + return fmt.Errorf("query is mandatory (at least start_date and end_date or backlog)") + } + frags := strings.Split(args[0], ":") + if len(frags) != 2 { + return fmt.Errorf("cloudwatch path must contain group and stream : /my/group/name:stream/name") + } + cw.Config.GroupName = frags[0] + cw.Config.StreamName = &frags[1] + cw.Config.Labels = labels + u, err := url.ParseQuery(args[1]) + if err != nil { + return errors.Wrapf(err, "while parsing %s", dsn) + } + + for k, v := range u { + switch k { + case "log_level": + if len(v) != 1 { + return fmt.Errorf("expected zero or one value for 'log_level'") + } + lvl, err := log.ParseLevel(v[0]) + if err != nil { + return errors.Wrapf(err, "unknown level %s", v[0]) + } + cw.logger.Logger.SetLevel(lvl) + + case "profile": + if len(v) != 1 { + return fmt.Errorf("expected zero or one value for 'profile'") + } + awsprof := v[0] + cw.Config.AwsProfile = &awsprof + cw.logger.Debugf("profile set to '%s'", *cw.Config.AwsProfile) + case "start_date": + if len(v) != 1 { + return fmt.Errorf("expected zero or one argument for 'start_date'") + } + //let's reuse our parser helper so that a ton of date formats are supported + strdate, startDate := parser.GenDateParse(v[0]) + cw.logger.Debugf("parsed '%s' as '%s'", v[0], strdate) + cw.Config.StartTime = &startDate + case "end_date": + if len(v) != 1 { + return fmt.Errorf("expected zero or one argument for 'end_date'") + } + //let's reuse our parser helper so that a ton of date formats are supported + strdate, endDate := parser.GenDateParse(v[0]) + cw.logger.Debugf("parsed '%s' as '%s'", v[0], strdate) + cw.Config.EndTime = &endDate + case "backlog": + if len(v) != 1 { + return fmt.Errorf("expected zero or one argument for 'backlog'") + } + //let's reuse our parser helper so that a ton of date formats are supported + duration, err := time.ParseDuration(v[0]) + if err != nil { + return errors.Wrapf(err, "unable to parse '%s' as duration", v[0]) + } + cw.logger.Debugf("parsed '%s' as '%s'", v[0], duration) + start := time.Now().UTC().Add(-duration) + cw.Config.StartTime = &start + end := time.Now().UTC() + cw.Config.EndTime = &end + default: + return fmt.Errorf("unexpected argument %s", k) + } + } + cw.logger.Tracef("host=%s", cw.Config.GroupName) + cw.logger.Tracef("stream=%s", *cw.Config.StreamName) + cw.Config.GetLogEventsPagesLimit = &def_GetLogEventsPagesLimit + + if err := cw.newClient(); err != nil { + return err + } + + if cw.Config.StreamName == nil || cw.Config.GroupName == "" { + return fmt.Errorf("missing stream or group name") + } + if cw.Config.StartTime == nil || cw.Config.EndTime == nil { + return fmt.Errorf("start_date and end_date or backlog are mandatory in one-shot mode") + } + + cw.Config.Mode = configuration.CAT_MODE + cw.streamIndexes = make(map[string]string) + cw.t = &tomb.Tomb{} + return nil +} + +func (cw *CloudwatchSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + //StreamName string, Start time.Time, End time.Time + config := LogStreamTailConfig{ + GroupName: cw.Config.GroupName, + StreamName: *cw.Config.StreamName, + StartTime: *cw.Config.StartTime, + EndTime: *cw.Config.EndTime, + GetLogEventsPagesLimit: *cw.Config.GetLogEventsPagesLimit, + logger: cw.logger.WithFields(log.Fields{ + "group": cw.Config.GroupName, + "stream": *cw.Config.StreamName, + }), + Labels: cw.Config.Labels, + ExpectMode: leaky.TIMEMACHINE, + } + return cw.CatLogStream(&config, out) +} + +func (cw *CloudwatchSource) CatLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error { + var startFrom *string + var head = true + /*convert the times*/ + startTime := cfg.StartTime.UTC().Unix() * 1000 + endTime := cfg.EndTime.UTC().Unix() * 1000 + hasMoreEvents := true + for hasMoreEvents { + select { + default: + cfg.logger.Tracef("Calling GetLogEventsPagesWithContext(%s, %s), startTime:%d / endTime:%d", + cfg.GroupName, cfg.StreamName, startTime, endTime) + cfg.logger.Tracef("startTime:%s / endTime:%s", cfg.StartTime, cfg.EndTime) + if startFrom != nil { + cfg.logger.Tracef("next_token: %s", *startFrom) + } + ctx := context.Background() + err := cw.cwClient.GetLogEventsPagesWithContext(ctx, + &cloudwatchlogs.GetLogEventsInput{ + Limit: aws.Int64(10), + LogGroupName: aws.String(cfg.GroupName), + LogStreamName: aws.String(cfg.StreamName), + StartTime: aws.Int64(startTime), + EndTime: aws.Int64(endTime), + StartFromHead: &head, + NextToken: startFrom, + }, + func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool { + cfg.logger.Tracef("in GetLogEventsPagesWithContext handker (%d events) (last:%t)", len(page.Events), lastPage) + for _, event := range page.Events { + evt, err := cwLogToEvent(event, cfg) + if err != nil { + cfg.logger.Warningf("discard event : %s", err) + } + cfg.logger.Debugf("pushing message : %s", evt.Line.Raw) + outChan <- evt + } + if startFrom != nil && *page.NextForwardToken == *startFrom { + cfg.logger.Debugf("reached end of available events") + hasMoreEvents = false + return false + } + startFrom = page.NextForwardToken + return true + }, + ) + if err != nil { + return errors.Wrapf(err, "while reading logs from %s/%s", cfg.GroupName, cfg.StreamName) + } + cfg.logger.Tracef("after GetLogEventsPagesWithContext") + case <-cw.t.Dying(): + cfg.logger.Warningf("cat stream killed") + return nil + } + } + cfg.logger.Tracef("CatLogStream out") + + return nil +} + +func cwLogToEvent(log *cloudwatchlogs.OutputLogEvent, cfg *LogStreamTailConfig) (types.Event, error) { + l := types.Line{} + evt := types.Event{} + if log.Message == nil { + return evt, fmt.Errorf("nil message") + } + msg := *log.Message + if cfg.PrependCloudwatchTimestamp != nil && *cfg.PrependCloudwatchTimestamp { + eventTimestamp := time.Unix(0, *log.Timestamp*int64(time.Millisecond)) + msg = eventTimestamp.String() + " " + msg + } + l.Raw = msg + l.Labels = cfg.Labels + l.Time = time.Now().UTC() + l.Src = fmt.Sprintf("%s/%s", cfg.GroupName, cfg.StreamName) + l.Process = true + l.Module = "cloudwatch" + evt.Line = l + evt.Process = true + evt.Type = types.LOG + evt.ExpectMode = cfg.ExpectMode + cfg.logger.Debugf("returned event labels : %+v", evt.Line.Labels) + return evt, nil +} diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go new file mode 100644 index 0000000..d922909 --- /dev/null +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -0,0 +1,808 @@ +package cloudwatchacquisition + +import ( + "fmt" + "net" + "os" + "runtime" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "gopkg.in/tomb.v2" +) + +/* + test plan : + - start on bad group/bad stream + - start on good settings (oneshot) -> check expected messages + - start on good settings (stream) -> check expected messages within given time + - check shutdown/restart +*/ + +func deleteAllLogGroups(t *testing.T, cw *CloudwatchSource) { + input := &cloudwatchlogs.DescribeLogGroupsInput{} + result, err := cw.cwClient.DescribeLogGroups(input) + require.NoError(t, err) + for _, group := range result.LogGroups { + _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: group.LogGroupName, + }) + require.NoError(t, err) + } +} + +func checkForLocalStackAvailability() error { + v := os.Getenv("AWS_ENDPOINT_FORCE") + if v == "" { + return fmt.Errorf("missing aws endpoint for tests : AWS_ENDPOINT_FORCE") + } + + v = strings.TrimPrefix(v, "http://") + + _, err := net.Dial("tcp", v) + if err != nil { + return fmt.Errorf("while dialing %s : %s : aws endpoint isn't available", v, err) + } + + return nil +} + +func TestMain(m *testing.M) { + if runtime.GOOS == "windows" { + os.Exit(0) + } + if err := checkForLocalStackAvailability(); err != nil { + log.Fatalf("local stack error : %s", err) + } + def_PollNewStreamInterval = 1 * time.Second + def_PollStreamInterval = 1 * time.Second + def_StreamReadTimeout = 10 * time.Second + def_MaxStreamAge = 5 * time.Second + def_PollDeadStreamInterval = 5 * time.Second + os.Exit(m.Run()) +} + +func TestWatchLogGroupForStreams(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + log.SetLevel(log.DebugLevel) + tests := []struct { + config []byte + expectedCfgErr string + expectedStartErr string + name string + setup func(*testing.T, *CloudwatchSource) + run func(*testing.T, *CloudwatchSource) + teardown func(*testing.T, *CloudwatchSource) + expectedResLen int + expectedResMessages []string + }{ + // require a group name that doesn't exist + { + name: "group_does_not_exists", + config: []byte(` +source: cloudwatch +aws_region: us-east-1 +labels: + type: test_source +group_name: b +stream_name: test_stream`), + expectedStartErr: "The specified log group does not exist", + setup: func(t *testing.T, cw *CloudwatchSource) { + deleteAllLogGroups(t, cw) + _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("test_group_not_used_1"), + }) + require.NoError(t, err) + }, + teardown: func(t *testing.T, cw *CloudwatchSource) { + _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("test_group_not_used_1"), + }) + require.NoError(t, err) + }, + }, + // test stream mismatch + { + name: "group_exists_bad_stream_name", + config: []byte(` +source: cloudwatch +aws_region: us-east-1 +labels: + type: test_source +group_name: test_group1 +stream_name: test_stream_bad`), + setup: func(t *testing.T, cw *CloudwatchSource) { + deleteAllLogGroups(t, cw) + _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("test_group1"), + }) + require.NoError(t, err) + + _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String("test_group1"), + LogStreamName: aws.String("test_stream"), + }) + require.NoError(t, err) + + // have a message before we start - won't be popped, but will trigger stream monitoring + _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_1"), + Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + }, + teardown: func(t *testing.T, cw *CloudwatchSource) { + _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("test_group1"), + }) + require.NoError(t, err) + }, + expectedResLen: 0, + }, + // test stream mismatch + { + name: "group_exists_bad_stream_regexp", + config: []byte(` +source: cloudwatch +aws_region: us-east-1 +labels: + type: test_source +group_name: test_group1 +stream_regexp: test_bad[0-9]+`), + setup: func(t *testing.T, cw *CloudwatchSource) { + deleteAllLogGroups(t, cw) + _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("test_group1"), + }) + require.NoError(t, err) + + _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String("test_group1"), + LogStreamName: aws.String("test_stream"), + }) + require.NoError(t, err) + + // have a message before we start - won't be popped, but will trigger stream monitoring + _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_1"), + Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + }, + teardown: func(t *testing.T, cw *CloudwatchSource) { + _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("test_group1"), + }) + require.NoError(t, err) + }, + expectedResLen: 0, + }, + // require a group name that does exist and contains a stream in which we gonna put events + { + name: "group_exists_stream_exists_has_events", + config: []byte(` +source: cloudwatch +aws_region: us-east-1 +labels: + type: test_source +group_name: test_log_group1 +log_level: trace +stream_name: test_stream`), + // expectedStartErr: "The specified log group does not exist", + setup: func(t *testing.T, cw *CloudwatchSource) { + deleteAllLogGroups(t, cw) + _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("test_log_group1"), + }) + require.NoError(t, err) + + _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + }) + require.NoError(t, err) + + // have a message before we start - won't be popped, but will trigger stream monitoring + _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_1"), + Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + }, + run: func(t *testing.T, cw *CloudwatchSource) { + // wait for new stream pickup + stream poll interval + time.Sleep(def_PollNewStreamInterval + (1 * time.Second)) + time.Sleep(def_PollStreamInterval + (1 * time.Second)) + _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_4"), + Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000), + }, + // and add an event in the future that will be popped + { + Message: aws.String("test_message_5"), + Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + }, + teardown: func(t *testing.T, cw *CloudwatchSource) { + _, err := cw.cwClient.DeleteLogStream(&cloudwatchlogs.DeleteLogStreamInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + }) + require.NoError(t, err) + + _, err = cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("test_log_group1"), + }) + require.NoError(t, err) + }, + expectedResLen: 3, + expectedResMessages: []string{"test_message_1", "test_message_4", "test_message_5"}, + }, + // have a stream generate events, reach time-out and gets polled again + { + name: "group_exists_stream_exists_has_events+timeout", + config: []byte(` +source: cloudwatch +aws_region: us-east-1 +labels: + type: test_source +group_name: test_log_group1 +log_level: trace +stream_name: test_stream`), + // expectedStartErr: "The specified log group does not exist", + setup: func(t *testing.T, cw *CloudwatchSource) { + deleteAllLogGroups(t, cw) + _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("test_log_group1"), + }) + require.NoError(t, err) + + _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + }) + require.NoError(t, err) + + // have a message before we start - won't be popped, but will trigger stream monitoring + _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_1"), + Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + }, + run: func(t *testing.T, cw *CloudwatchSource) { + // wait for new stream pickup + stream poll interval + time.Sleep(def_PollNewStreamInterval + (1 * time.Second)) + time.Sleep(def_PollStreamInterval + (1 * time.Second)) + // send some events + _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_41"), + Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + // wait for the stream to time-out + time.Sleep(def_StreamReadTimeout + (1 * time.Second)) + // and send events again + _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_51"), + Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + // wait for new stream pickup + stream poll interval + time.Sleep(def_PollNewStreamInterval + (1 * time.Second)) + time.Sleep(def_PollStreamInterval + (1 * time.Second)) + }, + teardown: func(t *testing.T, cw *CloudwatchSource) { + _, err := cw.cwClient.DeleteLogStream(&cloudwatchlogs.DeleteLogStreamInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + }) + require.NoError(t, err) + + _, err = cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("test_log_group1"), + }) + require.NoError(t, err) + }, + expectedResLen: 3, + expectedResMessages: []string{"test_message_1", "test_message_41", "test_message_51"}, + }, + // have a stream generate events, reach time-out and dead body collection + { + name: "group_exists_stream_exists_has_events+timeout+GC", + config: []byte(` +source: cloudwatch +aws_region: us-east-1 +labels: + type: test_source +group_name: test_log_group1 +log_level: trace +stream_name: test_stream`), + // expectedStartErr: "The specified log group does not exist", + setup: func(t *testing.T, cw *CloudwatchSource) { + deleteAllLogGroups(t, cw) + _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("test_log_group1"), + }) + require.NoError(t, err) + + _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + }) + require.NoError(t, err) + + // have a message before we start - won't be popped, but will trigger stream monitoring + _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_1"), + Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + }, + run: func(t *testing.T, cw *CloudwatchSource) { + // wait for new stream pickup + stream poll interval + time.Sleep(def_PollNewStreamInterval + (1 * time.Second)) + time.Sleep(def_PollStreamInterval + (1 * time.Second)) + time.Sleep(def_PollDeadStreamInterval + (1 * time.Second)) + }, + teardown: func(t *testing.T, cw *CloudwatchSource) { + _, err := cw.cwClient.DeleteLogStream(&cloudwatchlogs.DeleteLogStreamInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + }) + require.NoError(t, err) + + _, err = cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("test_log_group1"), + }) + require.NoError(t, err) + }, + expectedResLen: 1, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + dbgLogger := log.New().WithField("test", tc.name) + dbgLogger.Logger.SetLevel(log.DebugLevel) + dbgLogger.Infof("starting test") + cw := CloudwatchSource{} + err := cw.Configure(tc.config, dbgLogger) + cstest.RequireErrorContains(t, err, tc.expectedCfgErr) + + if tc.expectedCfgErr != "" { + return + } + + // run pre-routine : tests use it to set group & streams etc. + if tc.setup != nil { + tc.setup(t, &cw) + } + out := make(chan types.Event) + tmb := tomb.Tomb{} + var rcvdEvts []types.Event + + dbgLogger.Infof("running StreamingAcquisition") + actmb := tomb.Tomb{} + actmb.Go(func() error { + err := cw.StreamingAcquisition(out, &actmb) + dbgLogger.Infof("acquis done") + cstest.RequireErrorContains(t, err, tc.expectedStartErr) + return nil + }) + + // let's empty output chan + tmb.Go(func() error { + for { + select { + case in := <-out: + log.Debugf("received event %+v", in) + rcvdEvts = append(rcvdEvts, in) + case <-tmb.Dying(): + log.Debugf("pumper died") + return nil + } + } + }) + + if tc.run != nil { + tc.run(t, &cw) + } else { + dbgLogger.Warning("no code to run") + } + + time.Sleep(5 * time.Second) + dbgLogger.Infof("killing collector") + tmb.Kill(nil) + <-tmb.Dead() + dbgLogger.Infof("killing datasource") + actmb.Kill(nil) + <-actmb.Dead() + // dbgLogger.Infof("collected events : %d -> %+v", len(rcvd_evts), rcvd_evts) + // check results + if tc.expectedResLen != -1 { + if tc.expectedResLen != len(rcvdEvts) { + t.Fatalf("%s : expected %d results got %d -> %v", tc.name, tc.expectedResLen, len(rcvdEvts), rcvdEvts) + } + dbgLogger.Debugf("got %d expected messages", len(rcvdEvts)) + } + if len(tc.expectedResMessages) != 0 { + res := tc.expectedResMessages + for idx, v := range rcvdEvts { + if len(res) == 0 { + t.Fatalf("result %d/%d : received '%s', didn't expect anything (recvd:%d, expected:%d)", idx, len(rcvdEvts), v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) + } + if res[0] != v.Line.Raw { + t.Fatalf("result %d/%d : expected '%s', received '%s' (recvd:%d, expected:%d)", idx, len(rcvdEvts), res[0], v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) + } + dbgLogger.Debugf("got message '%s'", res[0]) + res = res[1:] + } + if len(res) != 0 { + t.Fatalf("leftover unmatched results : %v", res) + } + + } + if tc.teardown != nil { + tc.teardown(t, &cw) + } + }) + } +} + +func TestConfiguration(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + log.SetLevel(log.DebugLevel) + tests := []struct { + config []byte + expectedCfgErr string + expectedStartErr string + name string + }{ + { + name: "group_does_not_exists", + config: []byte(` +source: cloudwatch +aws_region: us-east-1 +labels: + type: test_source +group_name: test_group +stream_name: test_stream`), + expectedStartErr: "The specified log group does not exist", + }, + { + config: []byte(` +xxx: cloudwatch +labels: + type: test_source +group_name: test_group +stream_name: test_stream`), + expectedCfgErr: "field xxx not found in type", + }, + { + name: "missing_group_name", + config: []byte(` +source: cloudwatch +aws_region: us-east-1 +labels: + type: test_source +stream_name: test_stream`), + expectedCfgErr: "group_name is mandatory for CloudwatchSource", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + dbgLogger := log.New().WithField("test", tc.name) + dbgLogger.Logger.SetLevel(log.DebugLevel) + cw := CloudwatchSource{} + err := cw.Configure(tc.config, dbgLogger) + cstest.RequireErrorContains(t, err, tc.expectedCfgErr) + if tc.expectedCfgErr != "" { + return + } + + out := make(chan types.Event) + tmb := tomb.Tomb{} + + switch cw.GetMode() { + case "tail": + err = cw.StreamingAcquisition(out, &tmb) + case "cat": + err = cw.OneShotAcquisition(out, &tmb) + } + + cstest.RequireErrorContains(t, err, tc.expectedStartErr) + + log.Debugf("killing ...") + tmb.Kill(nil) + <-tmb.Dead() + log.Debugf("dead :)") + }) + } +} + +func TestConfigureByDSN(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + log.SetLevel(log.DebugLevel) + tests := []struct { + dsn string + labels map[string]string + expectedCfgErr string + name string + }{ + { + name: "missing_query", + dsn: "cloudwatch://bad_log_group:bad_stream_name", + expectedCfgErr: "query is mandatory (at least start_date and end_date or backlog)", + }, + { + name: "backlog", + dsn: "cloudwatch://bad_log_group:bad_stream_name?backlog=30m&log_level=info&profile=test", + // expectedCfgErr: "query is mandatory (at least start_date and end_date or backlog)", + }, + { + name: "start_date/end_date", + dsn: "cloudwatch://bad_log_group:bad_stream_name?start_date=2021/05/15 14:04&end_date=2021/05/15 15:04", + // expectedCfgErr: "query is mandatory (at least start_date and end_date or backlog)", + }, + { + name: "bad_log_level", + dsn: "cloudwatch://bad_log_group:bad_stream_name?backlog=4h&log_level=", + expectedCfgErr: "unknown level : not a valid logrus Level: ", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + dbgLogger := log.New().WithField("test", tc.name) + dbgLogger.Logger.SetLevel(log.DebugLevel) + cw := CloudwatchSource{} + err := cw.ConfigureByDSN(tc.dsn, tc.labels, dbgLogger) + cstest.RequireErrorContains(t, err, tc.expectedCfgErr) + }) + } +} + +func TestOneShotAcquisition(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + log.SetLevel(log.DebugLevel) + tests := []struct { + dsn string + expectedCfgErr string + expectedStartErr string + name string + setup func(*testing.T, *CloudwatchSource) + run func(*testing.T, *CloudwatchSource) + teardown func(*testing.T, *CloudwatchSource) + expectedResLen int + expectedResMessages []string + }{ + // stream with no data + { + name: "empty_stream", + dsn: "cloudwatch://test_log_group1:test_stream?backlog=1h", + // expectedStartErr: "The specified log group does not exist", + setup: func(t *testing.T, cw *CloudwatchSource) { + deleteAllLogGroups(t, cw) + _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("test_log_group1"), + }) + require.NoError(t, err) + + _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + }) + require.NoError(t, err) + }, + teardown: func(t *testing.T, cw *CloudwatchSource) { + _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("test_log_group1"), + }) + require.NoError(t, err) + }, + expectedResLen: 0, + }, + // stream with one event + { + name: "get_one_event", + dsn: "cloudwatch://test_log_group1:test_stream?backlog=1h", + // expectedStartErr: "The specified log group does not exist", + setup: func(t *testing.T, cw *CloudwatchSource) { + deleteAllLogGroups(t, cw) + _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("test_log_group1"), + }) + require.NoError(t, err) + + _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + }) + require.NoError(t, err) + + // this one is too much in the back + _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_1"), + Timestamp: aws.Int64(time.Now().UTC().Add(-(2 * time.Hour)).UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + + // this one can be read + _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_2"), + Timestamp: aws.Int64(time.Now().UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + + // this one is in the past + _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + LogGroupName: aws.String("test_log_group1"), + LogStreamName: aws.String("test_stream"), + LogEvents: []*cloudwatchlogs.InputLogEvent{ + { + Message: aws.String("test_message_3"), + Timestamp: aws.Int64(time.Now().UTC().Add(-(3 * time.Hour)).UTC().Unix() * 1000), + }, + }, + }) + require.NoError(t, err) + }, + teardown: func(t *testing.T, cw *CloudwatchSource) { + _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("test_log_group1"), + }) + require.NoError(t, err) + }, + expectedResLen: 1, + expectedResMessages: []string{"test_message_2"}, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + dbgLogger := log.New().WithField("test", tc.name) + dbgLogger.Logger.SetLevel(log.DebugLevel) + dbgLogger.Infof("starting test") + cw := CloudwatchSource{} + err := cw.ConfigureByDSN(tc.dsn, map[string]string{"type": "test"}, dbgLogger) + cstest.RequireErrorContains(t, err, tc.expectedCfgErr) + if tc.expectedCfgErr != "" { + return + } + + dbgLogger.Infof("config done test") + // run pre-routine : tests use it to set group & streams etc. + if tc.setup != nil { + tc.setup(t, &cw) + } + out := make(chan types.Event, 100) + tmb := tomb.Tomb{} + var rcvdEvts []types.Event + + dbgLogger.Infof("running StreamingAcquisition") + err = cw.OneShotAcquisition(out, &tmb) + dbgLogger.Infof("acquis done") + cstest.RequireErrorContains(t, err, tc.expectedStartErr) + close(out) + // let's empty output chan + for evt := range out { + rcvdEvts = append(rcvdEvts, evt) + } + + if tc.run != nil { + tc.run(t, &cw) + } else { + dbgLogger.Warning("no code to run") + } + if tc.expectedResLen != -1 { + if tc.expectedResLen != len(rcvdEvts) { + t.Fatalf("%s : expected %d results got %d -> %v", tc.name, tc.expectedResLen, len(rcvdEvts), rcvdEvts) + } else { + dbgLogger.Debugf("got %d expected messages", len(rcvdEvts)) + } + } + if len(tc.expectedResMessages) != 0 { + res := tc.expectedResMessages + for idx, v := range rcvdEvts { + if len(res) == 0 { + t.Fatalf("result %d/%d : received '%s', didn't expect anything (recvd:%d, expected:%d)", idx, len(rcvdEvts), v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) + } + if res[0] != v.Line.Raw { + t.Fatalf("result %d/%d : expected '%s', received '%s' (recvd:%d, expected:%d)", idx, len(rcvdEvts), res[0], v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) + } + dbgLogger.Debugf("got message '%s'", res[0]) + res = res[1:] + } + if len(res) != 0 { + t.Fatalf("leftover unmatched results : %v", res) + } + + } + if tc.teardown != nil { + tc.teardown(t, &cw) + } + }) + } +} diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go new file mode 100644 index 0000000..117eadd --- /dev/null +++ b/pkg/acquisition/modules/docker/docker.go @@ -0,0 +1,562 @@ +package dockeracquisition + +import ( + "bufio" + "context" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/dlog" + dockerTypes "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" +) + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_dockersource_hits_total", + Help: "Total lines that were read.", + }, + []string{"source"}) + +type DockerConfiguration struct { + CheckInterval string `yaml:"check_interval"` + FollowStdout bool `yaml:"follow_stdout"` + FollowStdErr bool `yaml:"follow_stderr"` + Until string `yaml:"until"` + Since string `yaml:"since"` + DockerHost string `yaml:"docker_host"` + ContainerName []string `yaml:"container_name"` + ContainerID []string `yaml:"container_id"` + ContainerNameRegexp []string `yaml:"container_name_regexp"` + ContainerIDRegexp []string `yaml:"container_id_regexp"` + ForceInotify bool `yaml:"force_inotify"` + configuration.DataSourceCommonCfg `yaml:",inline"` +} + +type DockerSource struct { + Config DockerConfiguration + runningContainerState map[string]*ContainerConfig + compiledContainerName []*regexp.Regexp + compiledContainerID []*regexp.Regexp + CheckIntervalDuration time.Duration + logger *log.Entry + Client client.CommonAPIClient + t *tomb.Tomb + containerLogsOptions *dockerTypes.ContainerLogsOptions +} + +type ContainerConfig struct { + Name string + ID string + t *tomb.Tomb + logger *log.Entry + Labels map[string]string + Tty bool +} + +func (d *DockerSource) Configure(Config []byte, logger *log.Entry) error { + var err error + + d.Config = DockerConfiguration{ + FollowStdout: true, // default + FollowStdErr: true, // default + CheckInterval: "1s", // default + } + d.logger = logger + + d.runningContainerState = make(map[string]*ContainerConfig) + + err = yaml.UnmarshalStrict(Config, &d.Config) + if err != nil { + return errors.Wrap(err, "Cannot parse DockerAcquisition configuration") + } + + d.logger.Tracef("DockerAcquisition configuration: %+v", d.Config) + if len(d.Config.ContainerName) == 0 && len(d.Config.ContainerID) == 0 && len(d.Config.ContainerIDRegexp) == 0 && len(d.Config.ContainerNameRegexp) == 0 { + return fmt.Errorf("no containers names or containers ID configuration provided") + } + + d.CheckIntervalDuration, err = time.ParseDuration(d.Config.CheckInterval) + if err != nil { + return fmt.Errorf("parsing 'check_interval' parameters: %s", d.CheckIntervalDuration) + } + + if d.Config.Mode == "" { + d.Config.Mode = configuration.TAIL_MODE + } + if d.Config.Mode != configuration.CAT_MODE && d.Config.Mode != configuration.TAIL_MODE { + return fmt.Errorf("unsupported mode %s for docker datasource", d.Config.Mode) + } + d.logger.Tracef("Actual DockerAcquisition configuration %+v", d.Config) + + for _, cont := range d.Config.ContainerNameRegexp { + d.compiledContainerName = append(d.compiledContainerName, regexp.MustCompile(cont)) + } + + for _, cont := range d.Config.ContainerIDRegexp { + d.compiledContainerID = append(d.compiledContainerID, regexp.MustCompile(cont)) + } + + dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return err + } + + if d.Config.Since == "" { + d.Config.Since = time.Now().UTC().Format(time.RFC3339) + } + + d.containerLogsOptions = &dockerTypes.ContainerLogsOptions{ + ShowStdout: d.Config.FollowStdout, + ShowStderr: d.Config.FollowStdErr, + Follow: true, + Since: d.Config.Since, + } + + if d.Config.Until != "" { + d.containerLogsOptions.Until = d.Config.Until + } + + if d.Config.DockerHost != "" { + if err := client.WithHost(d.Config.DockerHost)(dockerClient); err != nil { + return err + } + } + d.Client = dockerClient + + _, err = d.Client.Info(context.Background()) + + if err != nil { + return errors.Wrapf(err, "failed to configure docker datasource %s", d.Config.DockerHost) + } + + return nil +} + +func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error { + var err error + + if !strings.HasPrefix(dsn, d.GetName()+"://") { + return fmt.Errorf("invalid DSN %s for docker source, must start with %s://", dsn, d.GetName()) + } + + d.Config = DockerConfiguration{ + FollowStdout: true, + FollowStdErr: true, + CheckInterval: "1s", + } + d.Config.ContainerName = make([]string, 0) + d.Config.ContainerID = make([]string, 0) + d.runningContainerState = make(map[string]*ContainerConfig) + d.Config.Mode = configuration.CAT_MODE + d.logger = logger + d.Config.Labels = labels + + dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return err + } + + d.containerLogsOptions = &dockerTypes.ContainerLogsOptions{ + ShowStdout: d.Config.FollowStdout, + ShowStderr: d.Config.FollowStdErr, + Follow: false, + } + dsn = strings.TrimPrefix(dsn, d.GetName()+"://") + args := strings.Split(dsn, "?") + + if len(args) == 0 { + return fmt.Errorf("invalid dsn: %s", dsn) + } + + if len(args) == 1 && args[0] == "" { + return fmt.Errorf("empty %s DSN", d.GetName()+"://") + } + d.Config.ContainerName = append(d.Config.ContainerName, args[0]) + // we add it as an ID also so user can provide docker name or docker ID + d.Config.ContainerID = append(d.Config.ContainerID, args[0]) + + // no parameters + if len(args) == 1 { + d.Client = dockerClient + return nil + } + + parameters, err := url.ParseQuery(args[1]) + if err != nil { + return errors.Wrapf(err, "while parsing parameters %s: %s", dsn, err) + } + + for k, v := range parameters { + switch k { + case "log_level": + if len(v) != 1 { + return fmt.Errorf("only one 'log_level' parameters is required, not many") + } + lvl, err := log.ParseLevel(v[0]) + if err != nil { + return errors.Wrapf(err, "unknown level %s", v[0]) + } + d.logger.Logger.SetLevel(lvl) + case "until": + if len(v) != 1 { + return fmt.Errorf("only one 'until' parameters is required, not many") + } + d.containerLogsOptions.Until = v[0] + case "since": + if len(v) != 1 { + return fmt.Errorf("only one 'since' parameters is required, not many") + } + d.containerLogsOptions.Since = v[0] + case "follow_stdout": + if len(v) != 1 { + return fmt.Errorf("only one 'follow_stdout' parameters is required, not many") + } + followStdout, err := strconv.ParseBool(v[0]) + if err != nil { + return fmt.Errorf("parsing 'follow_stdout' parameters: %s", err) + } + d.Config.FollowStdout = followStdout + d.containerLogsOptions.ShowStdout = followStdout + case "follow_stderr": + if len(v) != 1 { + return fmt.Errorf("only one 'follow_stderr' parameters is required, not many") + } + followStdErr, err := strconv.ParseBool(v[0]) + if err != nil { + return fmt.Errorf("parsing 'follow_stderr' parameters: %s", err) + } + d.Config.FollowStdErr = followStdErr + d.containerLogsOptions.ShowStderr = followStdErr + case "docker_host": + if len(v) != 1 { + return fmt.Errorf("only one 'docker_host' parameters is required, not many") + } + if err := client.WithHost(v[0])(dockerClient); err != nil { + return err + } + } + } + d.Client = dockerClient + return nil +} + +func (d *DockerSource) GetMode() string { + return d.Config.Mode +} + +//SupportedModes returns the supported modes by the acquisition module +func (d *DockerSource) SupportedModes() []string { + return []string{configuration.TAIL_MODE, configuration.CAT_MODE} +} + +//OneShotAcquisition reads a set of file and returns when done +func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + d.logger.Debug("In oneshot") + runningContainer, err := d.Client.ContainerList(context.Background(), dockerTypes.ContainerListOptions{}) + if err != nil { + return err + } + foundOne := false + for _, container := range runningContainer { + if _, ok := d.runningContainerState[container.ID]; ok { + d.logger.Debugf("container with id %s is already being read from", container.ID) + continue + } + if containerConfig, ok := d.EvalContainer(container); ok { + d.logger.Infof("reading logs from container %s", containerConfig.Name) + d.logger.Debugf("logs options: %+v", *d.containerLogsOptions) + dockerReader, err := d.Client.ContainerLogs(context.Background(), containerConfig.ID, *d.containerLogsOptions) + if err != nil { + d.logger.Errorf("unable to read logs from container: %+v", err) + return err + } + // we use this library to normalize docker API logs (cf. https://ahmet.im/blog/docker-logs-api-binary-format-explained/) + foundOne = true + var scanner *bufio.Scanner + if containerConfig.Tty { + scanner = bufio.NewScanner(dockerReader) + } else { + reader := dlog.NewReader(dockerReader) + scanner = bufio.NewScanner(reader) + } + for scanner.Scan() { + line := scanner.Text() + if line == "" { + continue + } + l := types.Line{} + l.Raw = line + l.Labels = d.Config.Labels + l.Time = time.Now().UTC() + l.Src = containerConfig.Name + l.Process = true + l.Module = d.GetName() + linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc() + evt := types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE} + out <- evt + d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) + } + err = scanner.Err() + if err != nil { + d.logger.Errorf("Got error from docker read: %s", err) + } + d.runningContainerState[container.ID] = containerConfig + } + } + + t.Kill(nil) + + if !foundOne { + return fmt.Errorf("no container found named: %s, can't run one shot acquisition", d.Config.ContainerName[0]) + } + + return nil +} + +func (d *DockerSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (d *DockerSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (d *DockerSource) GetName() string { + return "docker" +} + +func (d *DockerSource) CanRun() error { + return nil +} + +func (d *DockerSource) getContainerTTY(containerId string) bool { + containerDetails, err := d.Client.ContainerInspect(context.Background(), containerId) + if err != nil { + return false + } + return containerDetails.Config.Tty +} + +func (d *DockerSource) EvalContainer(container dockerTypes.Container) (*ContainerConfig, bool) { + for _, containerID := range d.Config.ContainerID { + if containerID == container.ID { + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + } + } + + for _, containerName := range d.Config.ContainerName { + for _, name := range container.Names { + if strings.HasPrefix(name, "/") && len(name) > 0 { + name = name[1:] + } + if name == containerName { + return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + } + } + + } + + for _, cont := range d.compiledContainerID { + if matched := cont.Match([]byte(container.ID)); matched { + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + } + } + + for _, cont := range d.compiledContainerName { + for _, name := range container.Names { + if matched := cont.Match([]byte(name)); matched { + return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + } + } + + } + + return &ContainerConfig{}, false +} + +func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { + ticker := time.NewTicker(d.CheckIntervalDuration) + d.logger.Infof("Container watcher started, interval: %s", d.CheckIntervalDuration.String()) + for { + select { + case <-d.t.Dying(): + d.logger.Infof("stopping container watcher") + return nil + case <-ticker.C: + // to track for garbage collection + runningContainersID := make(map[string]bool) + runningContainer, err := d.Client.ContainerList(context.Background(), dockerTypes.ContainerListOptions{}) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "cannot connect to the docker daemon at") { + for idx, container := range d.runningContainerState { + if d.runningContainerState[idx].t.Alive() { + d.logger.Infof("killing tail for container %s", container.Name) + d.runningContainerState[idx].t.Kill(nil) + if err := d.runningContainerState[idx].t.Wait(); err != nil { + d.logger.Infof("error while waiting for death of %s : %s", container.Name, err) + } + } + delete(d.runningContainerState, idx) + } + } else { + log.Errorf("container list err: %s", err) + } + continue + } + + for _, container := range runningContainer { + runningContainersID[container.ID] = true + + // don't need to re eval an already monitored container + if _, ok := d.runningContainerState[container.ID]; ok { + continue + } + if containerConfig, ok := d.EvalContainer(container); ok { + monitChan <- containerConfig + } + } + + for containerStateID, containerConfig := range d.runningContainerState { + if _, ok := runningContainersID[containerStateID]; !ok { + deleteChan <- containerConfig + } + } + d.logger.Tracef("Reading logs from %d containers", len(d.runningContainerState)) + + ticker.Reset(d.CheckIntervalDuration) + } + } +} + +func (d *DockerSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + d.t = t + monitChan := make(chan *ContainerConfig) + deleteChan := make(chan *ContainerConfig) + d.logger.Infof("Starting docker acquisition") + t.Go(func() error { + return d.DockerManager(monitChan, deleteChan, out) + }) + + return d.WatchContainer(monitChan, deleteChan) +} + +func (d *DockerSource) Dump() interface{} { + return d +} + +func ReadTailScanner(scanner *bufio.Scanner, out chan string, t *tomb.Tomb) error { + for scanner.Scan() { + out <- scanner.Text() + } + return scanner.Err() +} + +func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types.Event, deleteChan chan *ContainerConfig) error { + container.logger.Infof("start tail for container %s", container.Name) + dockerReader, err := d.Client.ContainerLogs(context.Background(), container.ID, *d.containerLogsOptions) + if err != nil { + container.logger.Errorf("unable to read logs from container: %+v", err) + return err + } + + var scanner *bufio.Scanner + // we use this library to normalize docker API logs (cf. https://ahmet.im/blog/docker-logs-api-binary-format-explained/) + if container.Tty { + scanner = bufio.NewScanner(dockerReader) + } else { + reader := dlog.NewReader(dockerReader) + scanner = bufio.NewScanner(reader) + } + readerChan := make(chan string) + readerTomb := &tomb.Tomb{} + readerTomb.Go(func() error { + return ReadTailScanner(scanner, readerChan, readerTomb) + }) + for { + select { + case <-container.t.Dying(): + readerTomb.Kill(nil) + container.logger.Infof("tail stopped for container %s", container.Name) + return nil + case line := <-readerChan: + if line == "" { + continue + } + l := types.Line{} + l.Raw = line + l.Labels = d.Config.Labels + l.Time = time.Now().UTC() + l.Src = container.Name + l.Process = true + l.Module = d.GetName() + var evt types.Event + if !d.Config.UseTimeMachine { + evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE} + } else { + evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE} + } + linesRead.With(prometheus.Labels{"source": container.Name}).Inc() + outChan <- evt + d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) + case <-readerTomb.Dying(): + //This case is to handle temporarily losing the connection to the docker socket + //The only known case currently is when using docker-socket-proxy (and maybe a docker daemon restart) + d.logger.Debugf("readerTomb dying for container %s, removing it from runningContainerState", container.Name) + deleteChan <- container + //Also reset the Since to avoid re-reading logs + d.Config.Since = time.Now().UTC().Format(time.RFC3339) + d.containerLogsOptions.Since = d.Config.Since + return nil + } + } +} + +func (d *DockerSource) DockerManager(in chan *ContainerConfig, deleteChan chan *ContainerConfig, outChan chan types.Event) error { + d.logger.Info("DockerSource Manager started") + for { + select { + case newContainer := <-in: + if _, ok := d.runningContainerState[newContainer.ID]; !ok { + newContainer.t = &tomb.Tomb{} + newContainer.logger = d.logger.WithFields(log.Fields{"container_name": newContainer.Name}) + newContainer.t.Go(func() error { + return d.TailDocker(newContainer, outChan, deleteChan) + }) + d.runningContainerState[newContainer.ID] = newContainer + } + case containerToDelete := <-deleteChan: + if containerConfig, ok := d.runningContainerState[containerToDelete.ID]; ok { + log.Infof("container acquisition stopped for container '%s'", containerConfig.Name) + containerConfig.t.Kill(nil) + delete(d.runningContainerState, containerToDelete.ID) + } + case <-d.t.Dying(): + for idx, container := range d.runningContainerState { + if d.runningContainerState[idx].t.Alive() { + d.logger.Infof("killing tail for container %s", container.Name) + d.runningContainerState[idx].t.Kill(nil) + if err := d.runningContainerState[idx].t.Wait(); err != nil { + d.logger.Infof("error while waiting for death of %s : %s", container.Name, err) + } + } + } + d.runningContainerState = nil + d.logger.Debugf("routine cleanup done, return") + return nil + } + } +} diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go new file mode 100644 index 0000000..d019da3 --- /dev/null +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -0,0 +1,321 @@ +package dockeracquisition + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "os" + "runtime" + "strings" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/types" + dockerTypes "github.com/docker/docker/api/types" + dockerContainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + + "github.com/stretchr/testify/assert" +) + +const testContainerName = "docker_test" + +var readLogs = false + +func TestConfigure(t *testing.T) { + log.Infof("Test 'TestConfigure'") + + tests := []struct { + config string + expectedErr string + }{ + { + config: `foobar: asd`, + expectedErr: "line 1: field foobar not found in type dockeracquisition.DockerConfiguration", + }, + { + config: ` +mode: tail +source: docker`, + expectedErr: "no containers names or containers ID configuration provided", + }, + { + config: ` +mode: cat +source: docker +container_name: + - toto`, + expectedErr: "", + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "docker", + }) + for _, test := range tests { + f := DockerSource{} + err := f.Configure([]byte(test.config), subLogger) + cstest.AssertErrorContains(t, err, test.expectedErr) + } +} + +func TestConfigureDSN(t *testing.T) { + log.Infof("Test 'TestConfigureDSN'") + var dockerHost string + if runtime.GOOS == "windows" { + dockerHost = "npipe:////./pipe/docker_engine" + } else { + dockerHost = "unix:///var/run/podman/podman.sock" + } + tests := []struct { + name string + dsn string + expectedErr string + }{ + { + name: "invalid DSN", + dsn: "asd://", + expectedErr: "invalid DSN asd:// for docker source, must start with docker://", + }, + { + name: "empty DSN", + dsn: "docker://", + expectedErr: "empty docker:// DSN", + }, + { + name: "DSN ok with log_level", + dsn: "docker://test_docker?log_level=warn", + expectedErr: "", + }, + { + name: "DSN invalid log_level", + dsn: "docker://test_docker?log_level=foobar", + expectedErr: "unknown level foobar: not a valid logrus Level:", + }, + { + name: "DSN ok with multiple parameters", + dsn: fmt.Sprintf("docker://test_docker?since=42min&docker_host=%s", dockerHost), + expectedErr: "", + }, + } + subLogger := log.WithFields(log.Fields{ + "type": "docker", + }) + for _, test := range tests { + f := DockerSource{} + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger) + cstest.AssertErrorContains(t, err, test.expectedErr) + } +} + +type mockDockerCli struct { + client.Client +} + +func TestStreamingAcquisition(t *testing.T) { + log.SetOutput(os.Stdout) + log.SetLevel(log.InfoLevel) + log.Info("Test 'TestStreamingAcquisition'") + tests := []struct { + config string + expectedErr string + expectedOutput string + expectedLines int + logType string + logLevel log.Level + }{ + { + config: ` +source: docker +mode: cat +container_name: + - docker_test`, + expectedErr: "", + expectedOutput: "", + expectedLines: 3, + logType: "test", + logLevel: log.InfoLevel, + }, + { + config: ` +source: docker +mode: cat +container_name_regexp: + - docker_*`, + expectedErr: "", + expectedOutput: "", + expectedLines: 3, + logType: "test", + logLevel: log.InfoLevel, + }, + } + + for _, ts := range tests { + var logger *log.Logger + var subLogger *log.Entry + if ts.expectedOutput != "" { + logger.SetLevel(ts.logLevel) + subLogger = logger.WithFields(log.Fields{ + "type": "docker", + }) + } else { + subLogger = log.WithFields(log.Fields{ + "type": "docker", + }) + } + + readLogs = false + dockerTomb := tomb.Tomb{} + out := make(chan types.Event) + dockerSource := DockerSource{} + err := dockerSource.Configure([]byte(ts.config), subLogger) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + dockerSource.Client = new(mockDockerCli) + actualLines := 0 + readerTomb := &tomb.Tomb{} + streamTomb := tomb.Tomb{} + streamTomb.Go(func() error { + return dockerSource.StreamingAcquisition(out, &dockerTomb) + }) + readerTomb.Go(func() error { + time.Sleep(1 * time.Second) + ticker := time.NewTicker(1 * time.Second) + for { + select { + case <-out: + actualLines++ + ticker.Reset(1 * time.Second) + case <-ticker.C: + log.Infof("no more line to read") + dockerSource.t.Kill(nil) + return nil + } + } + }) + cstest.AssertErrorContains(t, err, ts.expectedErr) + + if err := readerTomb.Wait(); err != nil { + t.Fatal(err) + } + if ts.expectedLines != 0 { + assert.Equal(t, ts.expectedLines, actualLines) + } + err = streamTomb.Wait() + if err != nil { + t.Fatalf("docker acquisition error: %s", err) + } + } + +} + +func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes.ContainerListOptions) ([]dockerTypes.Container, error) { + if readLogs == true { + return []dockerTypes.Container{}, nil + } + containers := make([]dockerTypes.Container, 0) + container := &dockerTypes.Container{ + ID: "12456", + Names: []string{testContainerName}, + } + containers = append(containers, *container) + + return containers, nil +} + +func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, options dockerTypes.ContainerLogsOptions) (io.ReadCloser, error) { + if readLogs == true { + return io.NopCloser(strings.NewReader("")), nil + } + readLogs = true + data := []string{"docker\n", "test\n", "1234\n"} + ret := "" + for _, line := range data { + startLineByte := make([]byte, 8) + binary.LittleEndian.PutUint32(startLineByte, 1) //stdout stream + binary.BigEndian.PutUint32(startLineByte[4:], uint32(len(line))) + ret += fmt.Sprintf("%s%s", startLineByte, line) + } + r := io.NopCloser(strings.NewReader(ret)) // r type is io.ReadCloser + return r, nil +} + +func (cli *mockDockerCli) ContainerInspect(ctx context.Context, c string) (dockerTypes.ContainerJSON, error) { + r := dockerTypes.ContainerJSON{ + Config: &dockerContainer.Config{ + Tty: false, + }, + } + return r, nil +} + +func TestOneShot(t *testing.T) { + log.Infof("Test 'TestOneShot'") + + tests := []struct { + dsn string + expectedErr string + expectedOutput string + expectedLines int + logType string + logLevel log.Level + }{ + { + dsn: "docker://non_exist_docker", + expectedErr: "no container found named: non_exist_docker, can't run one shot acquisition", + expectedOutput: "", + expectedLines: 0, + logType: "test", + logLevel: log.InfoLevel, + }, + { + dsn: "docker://" + testContainerName, + expectedErr: "", + expectedOutput: "", + expectedLines: 3, + logType: "test", + logLevel: log.InfoLevel, + }, + } + + for _, ts := range tests { + var subLogger *log.Entry + var logger *log.Logger + if ts.expectedOutput != "" { + logger.SetLevel(ts.logLevel) + subLogger = logger.WithFields(log.Fields{ + "type": "docker", + }) + } else { + log.SetLevel(ts.logLevel) + subLogger = log.WithFields(log.Fields{ + "type": "docker", + }) + } + + readLogs = false + dockerClient := &DockerSource{} + labels := make(map[string]string) + labels["type"] = ts.logType + + if err := dockerClient.ConfigureByDSN(ts.dsn, labels, subLogger); err != nil { + t.Fatalf("unable to configure dsn '%s': %s", ts.dsn, err) + } + dockerClient.Client = new(mockDockerCli) + out := make(chan types.Event, 100) + tomb := tomb.Tomb{} + err := dockerClient.OneShotAcquisition(out, &tomb) + cstest.AssertErrorContains(t, err, ts.expectedErr) + + // else we do the check before actualLines is incremented ... + if ts.expectedLines != 0 { + assert.Equal(t, ts.expectedLines, len(out)) + } + } + +} diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go new file mode 100644 index 0000000..b1b29c9 --- /dev/null +++ b/pkg/acquisition/modules/file/file.go @@ -0,0 +1,493 @@ +package fileacquisition + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + "net/url" + "os" + "path" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/fsnotify/fsnotify" + "github.com/nxadm/tail" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" +) + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_filesource_hits_total", + Help: "Total lines that were read.", + }, + []string{"source"}) + +type FileConfiguration struct { + Filenames []string + ExcludeRegexps []string `yaml:"exclude_regexps"` + Filename string + ForceInotify bool `yaml:"force_inotify"` + configuration.DataSourceCommonCfg `yaml:",inline"` +} + +type FileSource struct { + config FileConfiguration + watcher *fsnotify.Watcher + watchedDirectories map[string]bool + tails map[string]bool + logger *log.Entry + files []string + exclude_regexps []*regexp.Regexp +} + +func (f *FileSource) Configure(Config []byte, logger *log.Entry) error { + fileConfig := FileConfiguration{} + f.logger = logger + f.watchedDirectories = make(map[string]bool) + f.tails = make(map[string]bool) + err := yaml.UnmarshalStrict(Config, &fileConfig) + if err != nil { + return errors.Wrap(err, "Cannot parse FileAcquisition configuration") + } + f.logger.Tracef("FileAcquisition configuration: %+v", fileConfig) + if len(fileConfig.Filename) != 0 { + fileConfig.Filenames = append(fileConfig.Filenames, fileConfig.Filename) + } + if len(fileConfig.Filenames) == 0 { + return fmt.Errorf("no filename or filenames configuration provided") + } + f.config = fileConfig + if f.config.Mode == "" { + f.config.Mode = configuration.TAIL_MODE + } + if f.config.Mode != configuration.CAT_MODE && f.config.Mode != configuration.TAIL_MODE { + return fmt.Errorf("unsupported mode %s for file source", f.config.Mode) + } + f.watcher, err = fsnotify.NewWatcher() + if err != nil { + return errors.Wrapf(err, "Could not create fsnotify watcher") + } + for _, exclude := range f.config.ExcludeRegexps { + re, err := regexp.Compile(exclude) + if err != nil { + return errors.Wrapf(err, "Could not compile regexp %s", exclude) + } + f.exclude_regexps = append(f.exclude_regexps, re) + } + f.logger.Tracef("Actual FileAcquisition Configuration %+v", f.config) + for _, pattern := range f.config.Filenames { + if f.config.ForceInotify { + directory := filepath.Dir(pattern) + f.logger.Infof("Force add watch on %s", directory) + if !f.watchedDirectories[directory] { + err = f.watcher.Add(directory) + if err != nil { + f.logger.Errorf("Could not create watch on directory %s : %s", directory, err) + continue + } + f.watchedDirectories[directory] = true + } + } + files, err := filepath.Glob(pattern) + if err != nil { + return errors.Wrap(err, "Glob failure") + } + if len(files) == 0 { + f.logger.Warnf("No matching files for pattern %s", pattern) + continue + } + for _, file := range files { + + //check if file is excluded + excluded := false + for _, pattern := range f.exclude_regexps { + if pattern.MatchString(file) { + excluded = true + f.logger.Infof("Skipping file %s as it matches exclude pattern %s", file, pattern) + break + } + } + if excluded { + continue + } + if files[0] != pattern && f.config.Mode == configuration.TAIL_MODE { //we have a glob pattern + directory := filepath.Dir(file) + f.logger.Debugf("Will add watch to directory: %s", directory) + if !f.watchedDirectories[directory] { + + err = f.watcher.Add(directory) + if err != nil { + f.logger.Errorf("Could not create watch on directory %s : %s", directory, err) + continue + } + f.watchedDirectories[directory] = true + } else { + f.logger.Debugf("Watch for directory %s already exists", directory) + } + } + f.logger.Infof("Adding file %s to datasources", file) + f.files = append(f.files, file) + } + } + return nil +} + +func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error { + if !strings.HasPrefix(dsn, "file://") { + return fmt.Errorf("invalid DSN %s for file source, must start with file://", dsn) + } + + f.logger = logger + + dsn = strings.TrimPrefix(dsn, "file://") + + args := strings.Split(dsn, "?") + + if len(args[0]) == 0 { + return fmt.Errorf("empty file:// DSN") + } + + if len(args) == 2 && len(args[1]) != 0 { + params, err := url.ParseQuery(args[1]) + if err != nil { + return errors.Wrap(err, "could not parse file args") + } + for key, value := range params { + if key != "log_level" { + return fmt.Errorf("unsupported key %s in file DSN", key) + } + if len(value) != 1 { + return errors.New("expected zero or one value for 'log_level'") + } + lvl, err := log.ParseLevel(value[0]) + if err != nil { + return errors.Wrapf(err, "unknown level %s", value[0]) + } + f.logger.Logger.SetLevel(lvl) + } + } + + f.config = FileConfiguration{} + f.config.Labels = labels + f.config.Mode = configuration.CAT_MODE + + f.logger.Debugf("Will try pattern %s", args[0]) + files, err := filepath.Glob(args[0]) + if err != nil { + return errors.Wrap(err, "Glob failure") + } + + if len(files) == 0 { + return fmt.Errorf("no matching files for pattern %s", args[0]) + } + + if len(files) > 1 { + f.logger.Infof("Will read %d files", len(files)) + } + + for _, file := range files { + f.logger.Infof("Adding file %s to filelist", file) + f.files = append(f.files, file) + } + return nil +} + +func (f *FileSource) GetMode() string { + return f.config.Mode +} + +//SupportedModes returns the supported modes by the acquisition module +func (f *FileSource) SupportedModes() []string { + return []string{configuration.TAIL_MODE, configuration.CAT_MODE} +} + +//OneShotAcquisition reads a set of file and returns when done +func (f *FileSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + f.logger.Debug("In oneshot") + for _, file := range f.files { + fi, err := os.Stat(file) + if err != nil { + return fmt.Errorf("could not stat file %s : %w", file, err) + } + if fi.IsDir() { + f.logger.Warnf("%s is a directory, ignoring it.", file) + continue + } + f.logger.Infof("reading %s at once", file) + err = f.readFile(file, out, t) + if err != nil { + return err + } + + } + return nil +} + +func (f *FileSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (f *FileSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (f *FileSource) GetName() string { + return "file" +} + +func (f *FileSource) CanRun() error { + return nil +} + +func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + f.logger.Debug("Starting live acquisition") + t.Go(func() error { + return f.monitorNewFiles(out, t) + }) + for _, file := range f.files { + //before opening the file, check if we need to specifically avoid it. (XXX) + skip := false + for _, pattern := range f.exclude_regexps { + if pattern.MatchString(file) { + f.logger.Infof("file %s matches exclusion pattern %s, skipping", file, pattern.String()) + skip = true + break + } + } + if skip { + continue + } + + //cf. https://github.com/crowdsecurity/crowdsec/issues/1168 + //do not rely on stat, reclose file immediately as it's opened by Tail + fd, err := os.Open(file) + if err != nil { + f.logger.Errorf("unable to read %s : %s", file, err) + continue + } + if err := fd.Close(); err != nil { + f.logger.Errorf("unable to close %s : %s", file, err) + continue + } + + fi, err := os.Stat(file) + if err != nil { + return fmt.Errorf("could not stat file %s : %w", file, err) + } + if fi.IsDir() { + f.logger.Warnf("%s is a directory, ignoring it.", file) + continue + } + + tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: true, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}}) + if err != nil { + f.logger.Errorf("Could not start tailing file %s : %s", file, err) + continue + } + f.tails[file] = true + t.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/file/live/fsnotify") + return f.tailFile(out, t, tail) + }) + } + return nil +} + +func (f *FileSource) Dump() interface{} { + return f +} + +func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { + logger := f.logger.WithField("goroutine", "inotify") + for { + select { + case event, ok := <-f.watcher.Events: + if !ok { + return nil + } + + if event.Op&fsnotify.Create == fsnotify.Create { + fi, err := os.Stat(event.Name) + if err != nil { + logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err) + continue + } + if fi.IsDir() { + continue + } + logger.Debugf("Detected new file %s", event.Name) + matched := false + for _, pattern := range f.config.Filenames { + logger.Debugf("Matching %s with %s", pattern, event.Name) + matched, err = path.Match(pattern, event.Name) + if err != nil { + logger.Errorf("Could not match pattern : %s", err) + continue + } + if matched { + break + } + } + if !matched { + continue + } + + //before opening the file, check if we need to specifically avoid it. (XXX) + skip := false + for _, pattern := range f.exclude_regexps { + if pattern.MatchString(event.Name) { + f.logger.Infof("file %s matches exclusion pattern %s, skipping", event.Name, pattern.String()) + skip = true + break + } + } + if skip { + continue + } + + if f.tails[event.Name] { + //we already have a tail on it, do not start a new one + logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) + break + } + //cf. https://github.com/crowdsecurity/crowdsec/issues/1168 + //do not rely on stat, reclose file immediately as it's opened by Tail + fd, err := os.Open(event.Name) + if err != nil { + f.logger.Errorf("unable to read %s : %s", event.Name, err) + continue + } + if err := fd.Close(); err != nil { + f.logger.Errorf("unable to close %s : %s", event.Name, err) + continue + } + //Slightly different parameters for Location, as we want to read the first lines of the newly created file + tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: true, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) + if err != nil { + logger.Errorf("Could not start tailing file %s : %s", event.Name, err) + break + } + f.tails[event.Name] = true + t.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/tailfile") + return f.tailFile(out, t, tail) + }) + } + case err, ok := <-f.watcher.Errors: + if !ok { + return nil + } + logger.Errorf("Error while monitoring folder: %s", err) + case <-t.Dying(): + err := f.watcher.Close() + if err != nil { + return errors.Wrapf(err, "could not remove all inotify watches") + } + return nil + } + } +} + +func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tail) error { + logger := f.logger.WithField("tail", tail.Filename) + logger.Debugf("-> Starting tail of %s", tail.Filename) + for { + select { + case <-t.Dying(): + logger.Infof("File datasource %s stopping", tail.Filename) + if err := tail.Stop(); err != nil { + f.logger.Errorf("error in stop : %s", err) + return err + } + return nil + case <-tail.Tomb.Dying(): //our tailer is dying + logger.Warningf("File reader of %s died", tail.Filename) + t.Kill(fmt.Errorf("dead reader for %s", tail.Filename)) + return fmt.Errorf("reader for %s is dead", tail.Filename) + case line := <-tail.Lines: + if line == nil { + logger.Warningf("tail for %s is empty", tail.Filename) + continue + } + if line.Err != nil { + logger.Warningf("fetch error : %v", line.Err) + return line.Err + } + if line.Text == "" { //skip empty lines + continue + } + linesRead.With(prometheus.Labels{"source": tail.Filename}).Inc() + l := types.Line{ + Raw: trimLine(line.Text), + Labels: f.config.Labels, + Time: line.Time, + Src: tail.Filename, + Process: true, + Module: f.GetName(), + } + //we're tailing, it must be real time logs + logger.Debugf("pushing %+v", l) + + expectMode := leaky.LIVE + if f.config.UseTimeMachine { + expectMode = leaky.TIMEMACHINE + } + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: expectMode} + } + } +} + +func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tomb) error { + var scanner *bufio.Scanner + logger := f.logger.WithField("oneshot", filename) + fd, err := os.Open(filename) + + if err != nil { + return errors.Wrapf(err, "failed opening %s", filename) + } + defer fd.Close() + + if strings.HasSuffix(filename, ".gz") { + gz, err := gzip.NewReader(fd) + if err != nil { + logger.Errorf("Failed to read gz file: %s", err) + return errors.Wrapf(err, "failed to read gz %s", filename) + } + defer gz.Close() + scanner = bufio.NewScanner(gz) + + } else { + scanner = bufio.NewScanner(fd) + } + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + if scanner.Text() == "" { + continue + } + l := types.Line{ + Raw: scanner.Text(), + Time: time.Now().UTC(), + Src: filename, + Labels: f.config.Labels, + Process: true, + Module: f.GetName(), + } + logger.Debugf("line %s", l.Raw) + linesRead.With(prometheus.Labels{"source": filename}).Inc() + + //we're reading logs at once, it must be time-machine buckets + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE} + } + t.Kill(nil) + return nil +} diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go new file mode 100644 index 0000000..06653fb --- /dev/null +++ b/pkg/acquisition/modules/file/file_test.go @@ -0,0 +1,474 @@ +package fileacquisition_test + +import ( + "fmt" + "os" + "runtime" + "testing" + "time" + + fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file" + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/tomb.v2" +) + +func TestBadConfiguration(t *testing.T) { + tests := []struct { + name string + config string + expectedErr string + }{ + { + name: "extra configuration key", + config: "foobar: asd.log", + expectedErr: "line 1: field foobar not found in type fileacquisition.FileConfiguration", + }, + { + name: "missing filenames", + config: "mode: tail", + expectedErr: "no filename or filenames configuration provided", + }, + { + name: "glob syntax error", + config: `filename: "[asd-.log"`, + expectedErr: "Glob failure: syntax error in pattern", + }, + { + name: "bad exclude regexp", + config: `filenames: ["asd.log"] +exclude_regexps: ["as[a-$d"]`, + expectedErr: "Could not compile regexp as", + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "file", + }) + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + f := fileacquisition.FileSource{} + err := f.Configure([]byte(tc.config), subLogger) + cstest.RequireErrorContains(t, err, tc.expectedErr) + }) + } +} + +func TestConfigureDSN(t *testing.T) { + file := "/etc/passwd" + + if runtime.GOOS == "windows" { + file = `C:\Windows\System32\drivers\etc\hosts` + } + + tests := []struct { + dsn string + expectedErr string + }{ + { + dsn: "asd://", + expectedErr: "invalid DSN asd:// for file source, must start with file://", + }, + { + dsn: "file://", + expectedErr: "empty file:// DSN", + }, + { + dsn: fmt.Sprintf("file://%s?log_level=warn", file), + }, + { + dsn: fmt.Sprintf("file://%s?log_level=foobar", file), + expectedErr: "unknown level foobar: not a valid logrus Level:", + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "file", + }) + + for _, tc := range tests { + tc := tc + t.Run(tc.dsn, func(t *testing.T) { + f := fileacquisition.FileSource{} + err := f.ConfigureByDSN(tc.dsn, map[string]string{"type": "testtype"}, subLogger) + cstest.RequireErrorContains(t, err, tc.expectedErr) + }) + } +} + +func TestOneShot(t *testing.T) { + permDeniedFile := "/etc/shadow" + permDeniedError := "failed opening /etc/shadow: open /etc/shadow: permission denied" + + if runtime.GOOS == "windows" { + // Technically, this is not a permission denied error, but we just want to test what happens + // if we do not have access to the file + permDeniedFile = `C:\Windows\System32\config\SAM` + permDeniedError = `failed opening C:\Windows\System32\config\SAM: open C:\Windows\System32\config\SAM: The process cannot access the file because it is being used by another process.` + } + + tests := []struct { + name string + config string + expectedConfigErr string + expectedErr string + expectedOutput string + expectedLines int + logLevel log.Level + setup func() + afterConfigure func() + teardown func() + }{ + { + name: "permission denied", + config: fmt.Sprintf(` +mode: cat +filename: %s`, permDeniedFile), + expectedErr: permDeniedError, + logLevel: log.WarnLevel, + expectedLines: 0, + }, + { + name: "ignored directory", + config: ` +mode: cat +filename: /`, + expectedOutput: "/ is a directory, ignoring it", + logLevel: log.WarnLevel, + expectedLines: 0, + }, + { + name: "glob syntax error", + config: ` +mode: cat +filename: "[*-.log"`, + expectedConfigErr: "Glob failure: syntax error in pattern", + logLevel: log.WarnLevel, + expectedLines: 0, + }, + { + name: "no matching files", + config: ` +mode: cat +filename: /do/not/exist`, + expectedOutput: "No matching files for pattern /do/not/exist", + logLevel: log.WarnLevel, + expectedLines: 0, + }, + { + name: "test.log", + config: ` +mode: cat +filename: test_files/test.log`, + expectedLines: 5, + logLevel: log.WarnLevel, + }, + { + name: "test.log.gz", + config: ` +mode: cat +filename: test_files/test.log.gz`, + expectedLines: 5, + logLevel: log.WarnLevel, + }, + { + name: "unexpected end of gzip stream", + config: ` +mode: cat +filename: test_files/bad.gz`, + expectedErr: "failed to read gz test_files/bad.gz: unexpected EOF", + expectedLines: 0, + logLevel: log.WarnLevel, + }, + { + name: "deleted file", + config: ` +mode: cat +filename: test_files/test_delete.log`, + setup: func() { + f, _ := os.Create("test_files/test_delete.log") + f.Close() + }, + afterConfigure: func() { + os.Remove("test_files/test_delete.log") + }, + expectedErr: "could not stat file test_files/test_delete.log", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + logger, hook := test.NewNullLogger() + logger.SetLevel(tc.logLevel) + + subLogger := logger.WithFields(log.Fields{ + "type": "file", + }) + + tomb := tomb.Tomb{} + out := make(chan types.Event, 100) + f := fileacquisition.FileSource{} + + if tc.setup != nil { + tc.setup() + } + + err := f.Configure([]byte(tc.config), subLogger) + cstest.RequireErrorContains(t, err, tc.expectedConfigErr) + if tc.expectedConfigErr != "" { + return + } + + if tc.afterConfigure != nil { + tc.afterConfigure() + } + err = f.OneShotAcquisition(out, &tomb) + actualLines := len(out) + cstest.RequireErrorContains(t, err, tc.expectedErr) + + if tc.expectedLines != 0 { + assert.Equal(t, tc.expectedLines, actualLines) + } + + if tc.expectedOutput != "" { + assert.Contains(t, hook.LastEntry().Message, tc.expectedOutput) + hook.Reset() + } + if tc.teardown != nil { + tc.teardown() + } + }) + } +} + +func TestLiveAcquisition(t *testing.T) { + permDeniedFile := "/etc/shadow" + permDeniedError := "unable to read /etc/shadow : open /etc/shadow: permission denied" + testPattern := "test_files/*.log" + + if runtime.GOOS == "windows" { + // Technically, this is not a permission denied error, but we just want to test what happens + // if we do not have access to the file + permDeniedFile = `C:\Windows\System32\config\SAM` + permDeniedError = `unable to read C:\Windows\System32\config\SAM : open C:\Windows\System32\config\SAM: The process cannot access the file because it is being used by another process` + testPattern = `test_files\\*.log` // the \ must be escaped for the yaml config + } + + tests := []struct { + name string + config string + expectedErr string + expectedOutput string + expectedLines int + logLevel log.Level + setup func() + afterConfigure func() + teardown func() + }{ + { + config: fmt.Sprintf(` +mode: tail +filename: %s`, permDeniedFile), + expectedOutput: permDeniedError, + logLevel: log.InfoLevel, + expectedLines: 0, + name: "PermissionDenied", + }, + { + config: ` +mode: tail +filename: /`, + expectedOutput: "/ is a directory, ignoring it", + logLevel: log.WarnLevel, + expectedLines: 0, + name: "Directory", + }, + { + config: ` +mode: tail +filename: /do/not/exist`, + expectedOutput: "No matching files for pattern /do/not/exist", + logLevel: log.WarnLevel, + expectedLines: 0, + name: "badPattern", + }, + { + config: fmt.Sprintf(` +mode: tail +filenames: + - %s +force_inotify: true`, testPattern), + expectedLines: 5, + logLevel: log.DebugLevel, + name: "basicGlob", + }, + { + config: fmt.Sprintf(` +mode: tail +filenames: + - %s +force_inotify: true`, testPattern), + expectedLines: 0, + logLevel: log.DebugLevel, + name: "GlobInotify", + afterConfigure: func() { + f, _ := os.Create("test_files/a.log") + f.Close() + time.Sleep(1 * time.Second) + os.Remove("test_files/a.log") + }, + }, + { + config: fmt.Sprintf(` +mode: tail +filenames: + - %s +force_inotify: true`, testPattern), + expectedLines: 5, + logLevel: log.DebugLevel, + name: "GlobInotifyChmod", + afterConfigure: func() { + f, _ := os.Create("test_files/a.log") + f.Close() + time.Sleep(1 * time.Second) + os.Chmod("test_files/a.log", 0o000) + }, + teardown: func() { + os.Chmod("test_files/a.log", 0o644) + os.Remove("test_files/a.log") + }, + }, + { + config: fmt.Sprintf(` +mode: tail +filenames: + - %s +force_inotify: true`, testPattern), + expectedLines: 5, + logLevel: log.DebugLevel, + name: "InotifyMkDir", + afterConfigure: func() { + os.Mkdir("test_files/pouet/", 0o700) + }, + teardown: func() { + os.Remove("test_files/pouet/") + }, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + logger, hook := test.NewNullLogger() + logger.SetLevel(tc.logLevel) + + subLogger := logger.WithFields(log.Fields{ + "type": "file", + }) + + tomb := tomb.Tomb{} + out := make(chan types.Event) + + f := fileacquisition.FileSource{} + + if tc.setup != nil { + tc.setup() + } + + err := f.Configure([]byte(tc.config), subLogger) + require.NoError(t, err) + + if tc.afterConfigure != nil { + tc.afterConfigure() + } + + actualLines := 0 + if tc.expectedLines != 0 { + go func() { + for { + select { + case <-out: + actualLines++ + case <-time.After(2 * time.Second): + return + } + } + }() + } + + err = f.StreamingAcquisition(out, &tomb) + cstest.RequireErrorContains(t, err, tc.expectedErr) + + if tc.expectedLines != 0 { + fd, err := os.Create("test_files/stream.log") + if err != nil { + t.Fatalf("could not create test file : %s", err) + } + + for i := 0; i < 5; i++ { + _, err = fmt.Fprintf(fd, "%d\n", i) + if err != nil { + t.Fatalf("could not write test file : %s", err) + os.Remove("test_files/stream.log") + } + } + + fd.Close() + // we sleep to make sure we detect the new file + time.Sleep(1 * time.Second) + os.Remove("test_files/stream.log") + assert.Equal(t, tc.expectedLines, actualLines) + } + + if tc.expectedOutput != "" { + if hook.LastEntry() == nil { + t.Fatalf("expected output %s, but got nothing", tc.expectedOutput) + } + + assert.Contains(t, hook.LastEntry().Message, tc.expectedOutput) + hook.Reset() + } + + if tc.teardown != nil { + tc.teardown() + } + + tomb.Kill(nil) + }) + } +} + +func TestExclusion(t *testing.T) { + config := `filenames: ["test_files/*.log*"] +exclude_regexps: ["\\.gz$"]` + logger, hook := test.NewNullLogger() + // logger.SetLevel(ts.logLevel) + subLogger := logger.WithFields(log.Fields{ + "type": "file", + }) + + f := fileacquisition.FileSource{} + if err := f.Configure([]byte(config), subLogger); err != nil { + subLogger.Fatalf("unexpected error: %s", err) + } + + expectedLogOutput := "Skipping file test_files/test.log.gz as it matches exclude pattern" + + if runtime.GOOS == "windows" { + expectedLogOutput = `Skipping file test_files\test.log.gz as it matches exclude pattern \.gz` + } + + if hook.LastEntry() == nil { + t.Fatalf("expected output %s, but got nothing", expectedLogOutput) + } + + assert.Contains(t, hook.LastEntry().Message, expectedLogOutput) + hook.Reset() +} diff --git a/pkg/acquisition/modules/file/tailline.go b/pkg/acquisition/modules/file/tailline.go new file mode 100644 index 0000000..ac377b6 --- /dev/null +++ b/pkg/acquisition/modules/file/tailline.go @@ -0,0 +1,7 @@ +// +build linux freebsd netbsd openbsd solaris !windows + +package fileacquisition + +func trimLine(text string) string { + return text +} diff --git a/pkg/acquisition/modules/file/tailline_windows.go b/pkg/acquisition/modules/file/tailline_windows.go new file mode 100644 index 0000000..0c853c6 --- /dev/null +++ b/pkg/acquisition/modules/file/tailline_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package fileacquisition + +import "strings" + +func trimLine(text string) string { + return strings.TrimRight(text, "\r") +} diff --git a/pkg/acquisition/modules/file/test_files/bad.gz b/pkg/acquisition/modules/file/test_files/bad.gz new file mode 100644 index 0000000..d81cc07 --- /dev/null +++ b/pkg/acquisition/modules/file/test_files/bad.gz @@ -0,0 +1 @@ +42 diff --git a/pkg/acquisition/modules/file/test_files/test.log b/pkg/acquisition/modules/file/test_files/test.log new file mode 100644 index 0000000..8a1218a --- /dev/null +++ b/pkg/acquisition/modules/file/test_files/test.log @@ -0,0 +1,5 @@ +1 +2 +3 +4 +5 diff --git a/pkg/acquisition/modules/file/test_files/test.log.gz b/pkg/acquisition/modules/file/test_files/test.log.gz new file mode 100644 index 0000000000000000000000000000000000000000..996579e210ecfec5abb123dd6f28c9d936f06f79 GIT binary patch literal 39 vcmb2|=HU3ad2#{+b4hA(iC#{AI)m{Oqen*1j2;?2Wn$=BlbOZEz`y_iC6NvX literal 0 HcmV?d00001 diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go new file mode 100644 index 0000000..6745637 --- /dev/null +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -0,0 +1,264 @@ +package journalctlacquisition + +import ( + "bufio" + "context" + "fmt" + "net/url" + "os/exec" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" +) + +type JournalCtlConfiguration struct { + configuration.DataSourceCommonCfg `yaml:",inline"` + Filters []string `yaml:"journalctl_filter"` +} + +type JournalCtlSource struct { + config JournalCtlConfiguration + logger *log.Entry + src string + args []string +} + +const journalctlCmd string = "journalctl" + +var ( + journalctlArgsOneShot = []string{} + journalctlArgstreaming = []string{"--follow", "-n", "0"} +) + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_journalctlsource_hits_total", + Help: "Total lines that were read.", + }, + []string{"source"}) + +func readLine(scanner *bufio.Scanner, out chan string, errChan chan error) error { + for scanner.Scan() { + txt := scanner.Text() + out <- txt + } + if errChan != nil && scanner.Err() != nil { + errChan <- scanner.Err() + close(errChan) + // the error is already consumed by runJournalCtl + return nil //nolint:nilerr + } + if errChan != nil { + close(errChan) + } + return nil +} + +func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) error { + ctx, cancel := context.WithCancel(context.Background()) + + cmd := exec.CommandContext(ctx, journalctlCmd, j.args...) + stdout, err := cmd.StdoutPipe() + if err != nil { + cancel() + return fmt.Errorf("could not get journalctl stdout: %s", err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + cancel() + return fmt.Errorf("could not get journalctl stderr: %s", err) + } + + stderrChan := make(chan string) + stdoutChan := make(chan string) + errChan := make(chan error, 1) + + logger := j.logger.WithField("src", j.src) + + logger.Infof("Running journalctl command: %s %s", cmd.Path, cmd.Args) + err = cmd.Start() + if err != nil { + cancel() + logger.Errorf("could not start journalctl command : %s", err) + return err + } + + stdoutscanner := bufio.NewScanner(stdout) + + if stdoutscanner == nil { + cancel() + cmd.Wait() + return fmt.Errorf("failed to create stdout scanner") + } + + stderrScanner := bufio.NewScanner(stderr) + + if stderrScanner == nil { + cancel() + cmd.Wait() + return fmt.Errorf("failed to create stderr scanner") + } + t.Go(func() error { + return readLine(stdoutscanner, stdoutChan, errChan) + }) + t.Go(func() error { + //looks like journalctl closes stderr quite early, so ignore its status (but not its output) + return readLine(stderrScanner, stderrChan, nil) + }) + + for { + select { + case <-t.Dying(): + logger.Infof("journalctl datasource %s stopping", j.src) + cancel() + cmd.Wait() //avoid zombie process + return nil + case stdoutLine := <-stdoutChan: + l := types.Line{} + l.Raw = stdoutLine + logger.Debugf("getting one line : %s", l.Raw) + l.Labels = j.config.Labels + l.Time = time.Now().UTC() + l.Src = j.src + l.Process = true + l.Module = j.GetName() + linesRead.With(prometheus.Labels{"source": j.src}).Inc() + var evt types.Event + if !j.config.UseTimeMachine { + evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE} + } else { + evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE} + } + out <- evt + case stderrLine := <-stderrChan: + logger.Warnf("Got stderr message : %s", stderrLine) + err := fmt.Errorf("journalctl error : %s", stderrLine) + t.Kill(err) + case errScanner, ok := <-errChan: + if !ok { + logger.Debugf("errChan is closed, quitting") + t.Kill(nil) + } + if errScanner != nil { + t.Kill(errScanner) + } + } + } +} + +func (j *JournalCtlSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (j *JournalCtlSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry) error { + config := JournalCtlConfiguration{} + j.logger = logger + err := yaml.UnmarshalStrict(yamlConfig, &config) + if err != nil { + return errors.Wrap(err, "Cannot parse JournalCtlSource configuration") + } + if config.Mode == "" { + config.Mode = configuration.TAIL_MODE + } + var args []string + if config.Mode == configuration.TAIL_MODE { + args = journalctlArgstreaming + } else { + args = journalctlArgsOneShot + } + if len(config.Filters) == 0 { + return fmt.Errorf("journalctl_filter is required") + } + j.args = append(args, config.Filters...) + j.src = fmt.Sprintf("journalctl-%s", strings.Join(config.Filters, ".")) + j.config = config + return nil +} + +func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error { + j.logger = logger + j.config = JournalCtlConfiguration{} + j.config.Mode = configuration.CAT_MODE + j.config.Labels = labels + + //format for the DSN is : journalctl://filters=FILTER1&filters=FILTER2 + if !strings.HasPrefix(dsn, "journalctl://") { + return fmt.Errorf("invalid DSN %s for journalctl source, must start with journalctl://", dsn) + } + + qs := strings.TrimPrefix(dsn, "journalctl://") + if len(qs) == 0 { + return fmt.Errorf("empty journalctl:// DSN") + } + + params, err := url.ParseQuery(qs) + if err != nil { + return fmt.Errorf("could not parse journalctl DSN : %s", err) + } + for key, value := range params { + switch key { + case "filters": + j.config.Filters = append(j.config.Filters, value...) + case "log_level": + if len(value) != 1 { + return fmt.Errorf("expected zero or one value for 'log_level'") + } + lvl, err := log.ParseLevel(value[0]) + if err != nil { + return errors.Wrapf(err, "unknown level %s", value[0]) + } + j.logger.Logger.SetLevel(lvl) + case "since": + j.args = append(j.args, "--since", value[0]) + default: + return fmt.Errorf("unsupported key %s in journalctl DSN", key) + } + } + j.args = append(j.args, j.config.Filters...) + return nil +} + +func (j *JournalCtlSource) GetMode() string { + return j.config.Mode +} + +func (j *JournalCtlSource) GetName() string { + return "journalctl" +} + +func (j *JournalCtlSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + defer types.CatchPanic("crowdsec/acquis/journalctl/oneshot") + err := j.runJournalCtl(out, t) + j.logger.Debug("Oneshot journalctl acquisition is done") + return err + +} + +func (j *JournalCtlSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + t.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/journalctl/streaming") + return j.runJournalCtl(out, t) + }) + return nil +} +func (j *JournalCtlSource) CanRun() error { + //TODO: add a more precise check on version or something ? + _, err := exec.LookPath(journalctlCmd) + return err +} +func (j *JournalCtlSource) Dump() interface{} { + return j +} diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go new file mode 100644 index 0000000..0428094 --- /dev/null +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -0,0 +1,273 @@ +package journalctlacquisition + +import ( + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "gopkg.in/tomb.v2" +) + +func TestBadConfiguration(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + tests := []struct { + config string + expectedErr string + }{ + { + config: `foobar: asd.log`, + expectedErr: "line 1: field foobar not found in type journalctlacquisition.JournalCtlConfiguration", + }, + { + config: ` +mode: tail +source: journalctl`, + expectedErr: "journalctl_filter is required", + }, + { + config: ` +mode: cat +source: journalctl +journalctl_filter: + - _UID=42`, + expectedErr: "", + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "journalctl", + }) + for _, test := range tests { + f := JournalCtlSource{} + err := f.Configure([]byte(test.config), subLogger) + cstest.AssertErrorContains(t, err, test.expectedErr) + } +} + +func TestConfigureDSN(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + tests := []struct { + dsn string + expectedErr string + }{ + { + dsn: "asd://", + expectedErr: "invalid DSN asd:// for journalctl source, must start with journalctl://", + }, + { + dsn: "journalctl://", + expectedErr: "empty journalctl:// DSN", + }, + { + dsn: "journalctl://foobar=42", + expectedErr: "unsupported key foobar in journalctl DSN", + }, + { + dsn: "journalctl://filters=%ZZ", + expectedErr: "could not parse journalctl DSN : invalid URL escape \"%ZZ\"", + }, + { + dsn: "journalctl://filters=_UID=42?log_level=warn", + expectedErr: "", + }, + { + dsn: "journalctl://filters=_UID=1000&log_level=foobar", + expectedErr: "unknown level foobar: not a valid logrus Level:", + }, + { + dsn: "journalctl://filters=_UID=1000&log_level=warn&since=yesterday", + expectedErr: "", + }, + } + subLogger := log.WithFields(log.Fields{ + "type": "journalctl", + }) + for _, test := range tests { + f := JournalCtlSource{} + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger) + cstest.AssertErrorContains(t, err, test.expectedErr) + } +} + +func TestOneShot(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + tests := []struct { + config string + expectedErr string + expectedOutput string + expectedLines int + logLevel log.Level + }{ + { + config: ` +source: journalctl +mode: cat +journalctl_filter: + - "-_UID=42"`, + expectedErr: "", + expectedOutput: "journalctl: invalid option", + logLevel: log.WarnLevel, + expectedLines: 0, + }, + { + config: ` +source: journalctl +mode: cat +journalctl_filter: + - _SYSTEMD_UNIT=ssh.service`, + expectedErr: "", + expectedOutput: "", + logLevel: log.WarnLevel, + expectedLines: 14, + }, + } + for _, ts := range tests { + var logger *log.Logger + var subLogger *log.Entry + var hook *test.Hook + if ts.expectedOutput != "" { + logger, hook = test.NewNullLogger() + logger.SetLevel(ts.logLevel) + subLogger = logger.WithFields(log.Fields{ + "type": "journalctl", + }) + } else { + subLogger = log.WithFields(log.Fields{ + "type": "journalctl", + }) + } + tomb := tomb.Tomb{} + out := make(chan types.Event, 100) + j := JournalCtlSource{} + err := j.Configure([]byte(ts.config), subLogger) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + err = j.OneShotAcquisition(out, &tomb) + cstest.AssertErrorContains(t, err, ts.expectedErr) + if err != nil { + continue + } + + if ts.expectedLines != 0 { + assert.Equal(t, ts.expectedLines, len(out)) + } + + if ts.expectedOutput != "" { + if hook.LastEntry() == nil { + t.Fatalf("Expected log output '%s' but got nothing !", ts.expectedOutput) + } + assert.Contains(t, hook.LastEntry().Message, ts.expectedOutput) + hook.Reset() + } + } +} + +func TestStreaming(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + tests := []struct { + config string + expectedErr string + expectedOutput string + expectedLines int + logLevel log.Level + }{ + { + config: ` +source: journalctl +mode: cat +journalctl_filter: + - _SYSTEMD_UNIT=ssh.service`, + expectedErr: "", + expectedOutput: "", + logLevel: log.WarnLevel, + expectedLines: 14, + }, + } + for _, ts := range tests { + var logger *log.Logger + var subLogger *log.Entry + var hook *test.Hook + if ts.expectedOutput != "" { + logger, hook = test.NewNullLogger() + logger.SetLevel(ts.logLevel) + subLogger = logger.WithFields(log.Fields{ + "type": "journalctl", + }) + } else { + subLogger = log.WithFields(log.Fields{ + "type": "journalctl", + }) + } + tomb := tomb.Tomb{} + out := make(chan types.Event) + j := JournalCtlSource{} + err := j.Configure([]byte(ts.config), subLogger) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + actualLines := 0 + if ts.expectedLines != 0 { + go func() { + READLOOP: + for { + select { + case <-out: + actualLines++ + case <-time.After(1 * time.Second): + break READLOOP + } + } + }() + } + + err = j.StreamingAcquisition(out, &tomb) + cstest.AssertErrorContains(t, err, ts.expectedErr) + if err != nil { + continue + } + + if ts.expectedLines != 0 { + time.Sleep(1 * time.Second) + assert.Equal(t, ts.expectedLines, actualLines) + } + tomb.Kill(nil) + tomb.Wait() + output, _ := exec.Command("pgrep", "-x", "journalctl").CombinedOutput() + if string(output) != "" { + t.Fatalf("Found a journalctl process after killing the tomb !") + } + if ts.expectedOutput != "" { + if hook.LastEntry() == nil { + t.Fatalf("Expected log output '%s' but got nothing !", ts.expectedOutput) + } + assert.Contains(t, hook.LastEntry().Message, ts.expectedOutput) + hook.Reset() + } + } +} + +func TestMain(m *testing.M) { + if os.Getenv("USE_SYSTEM_JOURNALCTL") == "" { + currentDir, _ := os.Getwd() + fullPath := filepath.Join(currentDir, "test_files") + os.Setenv("PATH", fullPath+":"+os.Getenv("PATH")) + } + os.Exit(m.Run()) +} diff --git a/pkg/acquisition/modules/journalctl/test_files/journalctl b/pkg/acquisition/modules/journalctl/test_files/journalctl new file mode 100755 index 0000000..83c9ad1 --- /dev/null +++ b/pkg/acquisition/modules/journalctl/test_files/journalctl @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 + +import argparse +import time +import sys + +class CustomParser(argparse.ArgumentParser): + #small hack to make argparse errors the same as journalctl + def error(self, message): + if 'unrecognized arguments:' in message: + sys.stderr.write("journalctl: invalid option -- '_'\n") + sys.stderr.flush() + exit(1) + else: + sys.stderr.write(message) + sys.stderr.flush() + exit(1) + +LOGS = """-- Logs begin at Fri 2019-07-26 17:13:13 CEST, end at Mon 2020-11-23 09:17:34 CET. -- +Nov 22 11:22:19 zeroed sshd[1480]: Invalid user wqeqwe from 127.0.0.1 port 55818 +Nov 22 11:22:23 zeroed sshd[1480]: Failed password for invalid user wqeqwe from 127.0.0.1 port 55818 ssh2 +Nov 22 11:23:22 zeroed sshd[1769]: Invalid user wqeqwe1 from 127.0.0.1 port 55824 +Nov 22 11:23:24 zeroed sshd[1769]: Disconnecting invalid user wqeqwe1 127.0.0.1 port 55824: Too many authentication failures [preauth] +Nov 22 11:23:24 zeroed sshd[1777]: Invalid user wqeqwe2 from 127.0.0.1 port 55826 +Nov 22 11:23:25 zeroed sshd[1777]: Disconnecting invalid user wqeqwe2 127.0.0.1 port 55826: Too many authentication failures [preauth] +Nov 22 11:23:25 zeroed sshd[1780]: Invalid user wqeqwe3 from 127.0.0.1 port 55828 +Nov 22 11:23:26 zeroed sshd[1780]: Disconnecting invalid user wqeqwe3 127.0.0.1 port 55828: Too many authentication failures [preauth] +Nov 22 11:23:26 zeroed sshd[1786]: Invalid user wqeqwe4 from 127.0.0.1 port 55830 +Nov 22 11:23:27 zeroed sshd[1786]: Failed password for invalid user wqeqwe4 from 127.0.0.1 port 55830 ssh2 +Nov 22 11:23:27 zeroed sshd[1786]: Disconnecting invalid user wqeqwe4 127.0.0.1 port 55830: Too many authentication failures [preauth] +Nov 22 11:23:27 zeroed sshd[1791]: Invalid user wqeqwe5 from 127.0.0.1 port 55834 +Nov 22 11:23:27 zeroed sshd[1791]: Failed password for invalid user wqeqwe5 from 127.0.0.1 port 55834 ssh2""" + +parser = CustomParser() +parser.add_argument('filter', metavar='FILTER', type=str, nargs='?') +parser.add_argument('-n', dest='n', type=int) +parser.add_argument('--follow', dest='follow', action='store_true', default=False) + +args = parser.parse_args() + +for line in LOGS.split('\n'): + print(line) + +if args.follow: + time.sleep(9999) \ No newline at end of file diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go new file mode 100644 index 0000000..b8b8937 --- /dev/null +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -0,0 +1,242 @@ +package kafkaacquisition + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "os" + "strconv" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/segmentio/kafka-go" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" +) + +var ( + dataSourceName = "kafka" +) + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_kafkasource_hits_total", + Help: "Total lines that were read from topic", + }, + []string{"topic"}) + +type KafkaConfiguration struct { + Brokers []string `yaml:"brokers"` + Topic string `yaml:"topic"` + GroupID string `yaml:"group_id"` + Timeout string `yaml:"timeout"` + TLS *TLSConfig `yaml:"tls"` + configuration.DataSourceCommonCfg `yaml:",inline"` +} + +type TLSConfig struct { + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + ClientCert string `yaml:"client_cert"` + ClientKey string `yaml:"client_key"` + CaCert string `yaml:"ca_cert"` +} + +type KafkaSource struct { + Config KafkaConfiguration + logger *log.Entry + Reader *kafka.Reader +} + +func (k *KafkaSource) Configure(Config []byte, logger *log.Entry) error { + var err error + + k.Config = KafkaConfiguration{} + k.logger = logger + err = yaml.UnmarshalStrict(Config, &k.Config) + if err != nil { + return errors.Wrapf(err, "cannot parse %s datasource configuration", dataSourceName) + } + if len(k.Config.Brokers) == 0 { + return fmt.Errorf("cannot create a %s reader with an empty list of broker addresses", dataSourceName) + } + if k.Config.Topic == "" { + return fmt.Errorf("cannot create a %s reader with am empty topic", dataSourceName) + } + if k.Config.Mode == "" { + k.Config.Mode = configuration.TAIL_MODE + } + dialer, err := k.Config.NewDialer() + if err != nil { + return errors.Wrapf(err, "cannot create %s dialer", dataSourceName) + } + k.Reader, err = k.Config.NewReader(dialer) + if err != nil { + return errors.Wrapf(err, "cannote create %s reader", dataSourceName) + } + if k.Reader == nil { + return fmt.Errorf("cannot create %s reader", dataSourceName) + } + return nil +} + +func (k *KafkaSource) ConfigureByDSN(string, map[string]string, *log.Entry) error { + return fmt.Errorf("%s datasource does not support command-line acquisition", dataSourceName) +} + +func (k *KafkaSource) GetMode() string { + return k.Config.Mode +} + +func (k *KafkaSource) GetName() string { + return dataSourceName +} + +func (k *KafkaSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + return fmt.Errorf("%s datasource does not support one-shot acquisition", dataSourceName) +} + +func (k *KafkaSource) CanRun() error { + return nil +} + +func (k *KafkaSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (k *KafkaSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (k *KafkaSource) Dump() interface{} { + return k +} + +func (k *KafkaSource) ReadMessage(out chan types.Event) error { + // Start processing from latest Offset + k.Reader.SetOffsetAt(context.Background(), time.Now()) + for { + m, err := k.Reader.ReadMessage(context.Background()) + if err != nil { + if err == io.EOF { + return nil + } + k.logger.Errorln(errors.Wrapf(err, "while reading %s message", dataSourceName)) + } + l := types.Line{ + Raw: string(m.Value), + Labels: k.Config.Labels, + Time: m.Time.UTC(), + Src: k.Config.Topic, + Process: true, + Module: k.GetName(), + } + linesRead.With(prometheus.Labels{"topic": k.Config.Topic}).Inc() + var evt types.Event + + if !k.Config.UseTimeMachine { + evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leakybucket.LIVE} + } else { + evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leakybucket.TIMEMACHINE} + } + out <- evt + } +} + +func (k *KafkaSource) RunReader(out chan types.Event, t *tomb.Tomb) error { + t.Go(func() error { + return k.ReadMessage(out) + }) + //nolint //fp + for { + select { + case <-t.Dying(): + k.logger.Infof("%s datasource topic %s stopping", dataSourceName, k.Config.Topic) + if err := k.Reader.Close(); err != nil { + return errors.Wrapf(err, "while closing %s reader on topic '%s'", dataSourceName, k.Config.Topic) + } + return nil + } + } +} + +func (k *KafkaSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + k.logger.Infof("start reader on topic '%s'", k.Config.Topic) + + t.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/kafka/live") + return k.RunReader(out, t) + }) + + return nil +} + +func (kc *KafkaConfiguration) NewTLSConfig() (*tls.Config, error) { + tlsConfig := tls.Config{ + InsecureSkipVerify: kc.TLS.InsecureSkipVerify, + } + + cert, err := tls.LoadX509KeyPair(kc.TLS.ClientCert, kc.TLS.ClientKey) + if err != nil { + return &tlsConfig, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + + caCert, err := os.ReadFile(kc.TLS.CaCert) + if err != nil { + return &tlsConfig, err + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caCertPool + + tlsConfig.BuildNameToCertificate() + return &tlsConfig, err +} + +func (kc *KafkaConfiguration) NewDialer() (*kafka.Dialer, error) { + dialer := &kafka.Dialer{} + var timeoutDuration time.Duration + timeoutDuration = time.Duration(10) * time.Second + if kc.Timeout != "" { + intTimeout, err := strconv.Atoi(kc.Timeout) + if err != nil { + return dialer, err + } + timeoutDuration = time.Duration(intTimeout) * time.Second + } + dialer = &kafka.Dialer{ + Timeout: timeoutDuration, + DualStack: true, + } + + if kc.TLS != nil { + tlsConfig, err := kc.NewTLSConfig() + if err != nil { + return dialer, err + } + dialer.TLS = tlsConfig + } + return dialer, nil +} + +func (kc *KafkaConfiguration) NewReader(dialer *kafka.Dialer) (*kafka.Reader, error) { + rConf := kafka.ReaderConfig{ + Brokers: kc.Brokers, + Topic: kc.Topic, + Dialer: dialer, + } + if kc.GroupID != "" { + rConf.GroupID = kc.GroupID + } + if err := rConf.Validate(); err != nil { + return &kafka.Reader{}, errors.Wrapf(err, "while validating reader configuration") + } + return kafka.NewReader(rConf), nil +} diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go new file mode 100644 index 0000000..8b229b6 --- /dev/null +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -0,0 +1,262 @@ +package kafkaacquisition + +import ( + "context" + "net" + "runtime" + "strconv" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/segmentio/kafka-go" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "gotest.tools/v3/assert" +) + +func TestConfigure(t *testing.T) { + tests := []struct { + config string + expectedErr string + }{ + { + config: ` +foobar: bla +source: kafka`, + expectedErr: "line 2: field foobar not found in type kafkaacquisition.KafkaConfiguration", + }, + { + config: `source: kafka`, + expectedErr: "cannot create a kafka reader with an empty list of broker addresses", + }, + { + config: ` +source: kafka +brokers: + - bla +timeout: 5`, + expectedErr: "cannot create a kafka reader with am empty topic", + }, + { + config: ` +source: kafka +brokers: + - bla +topic: toto +timeout: aa`, + expectedErr: "cannot create kafka dialer: strconv.Atoi: parsing \"aa\": invalid syntax", + }, + { + config: ` +source: kafka +brokers: + - localhost:9092 +topic: crowdsec`, + expectedErr: "", + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "kafka", + }) + for _, test := range tests { + k := KafkaSource{} + err := k.Configure([]byte(test.config), subLogger) + cstest.AssertErrorContains(t, err, test.expectedErr) + } +} + +func writeToKafka(w *kafka.Writer, logs []string) { + + for idx, log := range logs { + err := w.WriteMessages(context.Background(), kafka.Message{ + Key: []byte(strconv.Itoa(idx)), + // create an arbitrary message payload for the value + Value: []byte(log), + }) + if err != nil { + panic("could not write message " + err.Error()) + } + } +} + +func createTopic(topic string, broker string) { + conn, err := kafka.Dial("tcp", broker) + if err != nil { + panic(err.Error()) + } + defer conn.Close() + + controller, err := conn.Controller() + if err != nil { + panic(err.Error()) + } + var controllerConn *kafka.Conn + controllerConn, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port))) + if err != nil { + panic(err.Error()) + } + defer controllerConn.Close() + + topicConfigs := []kafka.TopicConfig{ + { + Topic: topic, + NumPartitions: 1, + ReplicationFactor: 1, + }, + } + + err = controllerConn.CreateTopics(topicConfigs...) + if err != nil { + panic(err.Error()) + } +} + +func TestStreamingAcquisition(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + tests := []struct { + name string + logs []string + expectedLines int + expectedErr string + }{ + { + name: "valid msgs", + logs: []string{ + "message 1", + "message 2", + "message 3", + }, + expectedLines: 3, + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "kafka", + }) + + createTopic("crowdsecplaintext", "localhost:9092") + + w := kafka.NewWriter(kafka.WriterConfig{ + Brokers: []string{"localhost:9092"}, + Topic: "crowdsecplaintext", + }) + if w == nil { + log.Fatalf("Unable to setup a kafka producer") + } + + for _, ts := range tests { + ts := ts + t.Run(ts.name, func(t *testing.T) { + k := KafkaSource{} + err := k.Configure([]byte(` +source: kafka +brokers: + - localhost:9092 +topic: crowdsecplaintext`), subLogger) + if err != nil { + t.Fatalf("could not configure kafka source : %s", err) + } + tomb := tomb.Tomb{} + out := make(chan types.Event) + err = k.StreamingAcquisition(out, &tomb) + cstest.AssertErrorContains(t, err, ts.expectedErr) + + actualLines := 0 + go writeToKafka(w, ts.logs) + READLOOP: + for { + select { + case <-out: + actualLines++ + case <-time.After(2 * time.Second): + break READLOOP + } + } + assert.Equal(t, ts.expectedLines, actualLines) + tomb.Kill(nil) + tomb.Wait() + }) + } + +} + +func TestStreamingAcquisitionWithSSL(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + tests := []struct { + name string + logs []string + expectedLines int + expectedErr string + }{ + { + name: "valid msgs", + logs: []string{ + "message 1", + "message 2", + }, + expectedLines: 2, + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "kafka", + }) + + createTopic("crowdsecssl", "localhost:9092") + + w2 := kafka.NewWriter(kafka.WriterConfig{ + Brokers: []string{"localhost:9092"}, + Topic: "crowdsecssl", + }) + if w2 == nil { + log.Fatalf("Unable to setup a kafka producer") + } + + for _, ts := range tests { + ts := ts + t.Run(ts.name, func(t *testing.T) { + k := KafkaSource{} + err := k.Configure([]byte(` +source: kafka +brokers: + - localhost:9093 +topic: crowdsecssl +tls: + insecure_skip_verify: true + client_cert: ./testdata/kafkaClient.certificate.pem + client_key: ./testdata/kafkaClient.key + ca_cert: ./testdata/snakeoil-ca-1.crt + `), subLogger) + if err != nil { + t.Fatalf("could not configure kafka source : %s", err) + } + tomb := tomb.Tomb{} + out := make(chan types.Event) + err = k.StreamingAcquisition(out, &tomb) + cstest.AssertErrorContains(t, err, ts.expectedErr) + + actualLines := 0 + go writeToKafka(w2, ts.logs) + READLOOP: + for { + select { + case <-out: + actualLines++ + case <-time.After(2 * time.Second): + break READLOOP + } + } + assert.Equal(t, ts.expectedLines, actualLines) + tomb.Kill(nil) + tomb.Wait() + }) + } + +} diff --git a/pkg/acquisition/modules/kafka/testdata/kafkaClient.certificate.pem b/pkg/acquisition/modules/kafka/testdata/kafkaClient.certificate.pem new file mode 100644 index 0000000..b8e491d --- /dev/null +++ b/pkg/acquisition/modules/kafka/testdata/kafkaClient.certificate.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDyDCCArCgAwIBAgIUZ3H0cvKHzTXDByikP2PLvhqCTSwwDQYJKoZIhvcNAQEL +BQAwbzEZMBcGA1UEAwwQQ3Jvd2RTZWMgVGVzdCBDQTERMA8GA1UECwwIQ3Jvd2Rz +ZWMxETAPBgNVBAoMCENyb3dkc2VjMQ4wDAYDVQQHDAVQYXJpczEPMA0GA1UECAwG +RnJhbmNlMQswCQYDVQQGEwJGUjAeFw0yMjA4MDExNjA5NDJaFw00OTEyMTYxNjA5 +NDJaMGYxCzAJBgNVBAYTAkZSMQ8wDQYDVQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBh +cmlzMREwDwYDVQQKEwhDcm93ZHNlYzENMAsGA1UECxMEVEVTVDEUMBIGA1UEAxML +a2Fma2FDbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCG5R6r +xi9FcL6p8bD5+bpV3bTDNwRTF4b9psrhVY8MhvjvaoYODHvENJaDb3Z/ipDUdG6e +zjgigfjLRRwxvj2+E0nTn/TsfRQIlH2BYPEzCCrG33WKkcmG1K3LEbkXGyBcPljd +DPHb2nbZERDFBcIlqNM5N9+cbLFQnJNw3u7Nsv/e4jjUpAeYg30YVKwrr9P4mj8L +NR+ZALe0+2NUJpTYX0ZP4vPeYqTGWPshMrGHLrChhYnaYWzEvYITjVtzkHk9xtFg +uRGjgtwlpf0m2EM8GHhteHaLb2efU1C860QaFkTBK1JeGU1A2O6O8lOo8CMVG6h+ +IBA1kspGRO0wmix5AgMBAAGjZTBjMCEGA1UdEQQaMBiCC2thZmthQ2xpZW50ggls +b2NhbGhvc3QwHQYDVR0OBBYEFD/QYQ2ppLhrC8qnfSjEsppvI/NPMB8GA1UdIwQY +MBaAFCCtzZtp2uUwxDCvIf8ETMpCtLxzMA0GCSqGSIb3DQEBCwUAA4IBAQA7Bly4 +t1ob95om3h+d9tYOuqzERUhO9BZXjqGFmOxb4pmpg5ANa9j82VOy0PWvBPR4M6N5 +uHwUKj6S4HWDLpabNNsBWYUzILBBQDqkiKgy0NmakZjv2fbFSIEpZF8sfyL3Z/ci +JRo6SqZWILh7B2BqysLmgTJeRFode3zqIKhLPIqYqEDBCwgSL1quX0afut2q86lx +x2RJB/N8QsNfXSjTOojXY3cJzLdW4XKGZKk75YhlpYt+v5235paVbocz32diQczk +9yCqfJfG8BBNA6WdPgtLhQHiDLO7UY+Y+jGIe2G41w7adT/b2Omeb2h3RbGeqKx9 +WteVlQb955ItDXKI +-----END CERTIFICATE----- diff --git a/pkg/acquisition/modules/kafka/testdata/kafkaClient.key b/pkg/acquisition/modules/kafka/testdata/kafkaClient.key new file mode 100644 index 0000000..23059ef --- /dev/null +++ b/pkg/acquisition/modules/kafka/testdata/kafkaClient.key @@ -0,0 +1,32 @@ +Bag Attributes + friendlyName: kafkaclient + localKeyID: 54 69 6D 65 20 31 36 35 39 33 37 30 31 38 36 30 33 36 +Key Attributes: +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCG5R6rxi9FcL6p +8bD5+bpV3bTDNwRTF4b9psrhVY8MhvjvaoYODHvENJaDb3Z/ipDUdG6ezjgigfjL +RRwxvj2+E0nTn/TsfRQIlH2BYPEzCCrG33WKkcmG1K3LEbkXGyBcPljdDPHb2nbZ +ERDFBcIlqNM5N9+cbLFQnJNw3u7Nsv/e4jjUpAeYg30YVKwrr9P4mj8LNR+ZALe0 ++2NUJpTYX0ZP4vPeYqTGWPshMrGHLrChhYnaYWzEvYITjVtzkHk9xtFguRGjgtwl +pf0m2EM8GHhteHaLb2efU1C860QaFkTBK1JeGU1A2O6O8lOo8CMVG6h+IBA1kspG +RO0wmix5AgMBAAECggEAYLrdwomMDjxpzI2OvcJQ1w/zdmT2Ses+FpfLNchXmsES +swPs+xgCjFC1eaytCYpAjsirJl90K3KOCJ0XOahUt/822nUCea67deedE/CDJXf+ +zLsim2otW+0YbtzXn/UIwHzI1kJZELFYthEhuFaHwN+OD6K8S3w5rjeJFtAV6BQc +AzMAwQ+6j8DG/V+5mgp4YCrcGXmJzvdXqJdiBmFwoEOAdp/ahTjMscmyI0ZQdMpF +t8e8x5WDVT65ScUA6nvKkSttYhJ5qpEWiMerR8rBJbkbNi529fUW7/sX1xTMYI5G +psOxGdXGSyH2i365DtFxHhtovSc+TgWpNJgZDcJLLQKBgQDdWi4sJO985m1hps7m +bKiXLJE/gbSYQekIlt4/KGbY01BrKSSvgsVTIZbC/m8GhAG48uLG1y51R30cGx6g +Wg8616duqnq+P7pvw+hWsGrtzYh4URx6T91SJi313Xi5ouqLsNiMznPoLXEnDgJv +xO17TkCnThU/Ms6ml1PFeccAYwKBgQCcAoFVqibXRyFoD2UVMpgekOQ+QxBZXItm +RoiBhimEahhx6DjJ9GPmAWUTJfAMom0TNoYa/ziM/+VdNruUMRUvAqBHV2vqllYE +Szhfxlh0RyCiZzrgEqZLVMdr0vxbeA4e5D2+26NH0YHGqCVacdX5659bSM5hcP/s +WO/fGIcAcwKBgQCIKv3UcjRRZX9MX01QOu/Lb8M6ihQKxiERA55cxAHgyL3j7j9/ +KLcy2Krh8ZtjKrnUiLYxFBakVwID1DsW8dAEpr19Oqqfdpy+PIolKgndmF6nhV47 +b/36lzoW0dN+f1ZB8NyGYkqzPaEqIVgmYcKl5BGp2kL/ycWOffEuvidJeQKBgHls +eb1y8Ugc1KNpWANnnX4sx3iuOctTfCveOeCVyzqEWQJO++Qzko0yCLkejfdHdB3A +EiBxBFK52Ir0TorIqPQt1xGvuQ6cc9ZjtTzV44Kc2YmNTwWXflajZZNGY6PNjS/9 +9RDXYf5D0f4MYQZEE4axHRavU/IDQS1zCz9Yl7qBAoGAezrd5EziH9VNHMzPnfkH ++Lg2DCrRbyG1pHpuKJg2i98Ulwuu/9A5m5Vj1iYrANt9v4ycWimLikyd5vJXW60V +9PBb8FB/vjpXNJ1PZBGjlxgpWpzF13JGcJpFBK+z5yCPevzJlc/H+IAQbd3mS3WW +DDwAGG2L41aLYKmsjAtr76I= +-----END PRIVATE KEY----- diff --git a/pkg/acquisition/modules/kafka/testdata/snakeoil-ca-1.crt b/pkg/acquisition/modules/kafka/testdata/snakeoil-ca-1.crt new file mode 100644 index 0000000..1205eaf --- /dev/null +++ b/pkg/acquisition/modules/kafka/testdata/snakeoil-ca-1.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDvzCCAqegAwIBAgIUSc+OZ8EjaDgzm0sjqlHVXjZ0od0wDQYJKoZIhvcNAQEL +BQAwbzEZMBcGA1UEAwwQQ3Jvd2RTZWMgVGVzdCBDQTERMA8GA1UECwwIQ3Jvd2Rz +ZWMxETAPBgNVBAoMCENyb3dkc2VjMQ4wDAYDVQQHDAVQYXJpczEPMA0GA1UECAwG +RnJhbmNlMQswCQYDVQQGEwJGUjAeFw0yMjA4MDExNjA5MzZaFw0yMzA4MDExNjA5 +MzZaMG8xGTAXBgNVBAMMEENyb3dkU2VjIFRlc3QgQ0ExETAPBgNVBAsMCENyb3dk +c2VjMREwDwYDVQQKDAhDcm93ZHNlYzEOMAwGA1UEBwwFUGFyaXMxDzANBgNVBAgM +BkZyYW5jZTELMAkGA1UEBhMCRlIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDAzj1gJzEhzcymL9dZX6+dTLdi9RDoII7PWtYCIoY5tqvewOzVBZMEDEhG +az8Btwo6Edr7u804Zule9ZVSaTkmse+thNthukXrtmTEuOuienym5KkVddNckTtr +w/5MLMkKK0Ux45BYW4H3wT1HpD56ezCUhxL5O3ACPjufw7yvMheHRnQxe3txmlNq +rd9swZH0sdZovWmW2Fj+C5qYbP/6hLzii9SNWOzOnKxlbw8CMBzK7KZgWp4qi4sz +tFCkGh2+Ya2QV3+q9Z6fD3hTZJfELEbDgP7ULYrvlGzLrrfLFcqAwmQ360PlsWiL +bg0+/rWkBRz/3wpma2RP+dGFfaj7AgMBAAGjUzBRMB0GA1UdDgQWBBQgrc2badrl +MMQwryH/BEzKQrS8czAfBgNVHSMEGDAWgBQgrc2badrlMMQwryH/BEzKQrS8czAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAE6ct4k+X4hAw+TUpN +E/rVrEybHFv6qvgyE7Ay/LhpevU+r8UBChv3XZ/u3h4WKdqFrxPN4JDOvIXN0Jq2 +Xs7Bs//hj+hULvJ3DWfQEQ6LivcxVxQsU47Sbxf6sUeCV3kXSxjFEcsvSx9kPNv6 +3Bi1EwPrMiwNdpB1BDUG7Z2mFxhoHv1SUppE7Lhu/x/1b7LgYqNy2VWWOFg/TZI2 +tdg45fMtNYp8kdQW+r18YxToQHUjXkkQqW9HSyxIqeabVqxuuptyY+OSIIFxBWaB +A4BbiHPKhJ0umCQa9mPeVKWUjUeXzRnHMXw2nPyqfK+1wQXt/7DZrBQLVe5Z9IHG +DZj/ +-----END CERTIFICATE----- diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go new file mode 100644 index 0000000..cce5ffb --- /dev/null +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -0,0 +1,515 @@ +package kinesisacquisition + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" +) + +type KinesisConfiguration struct { + configuration.DataSourceCommonCfg `yaml:",inline"` + StreamName string `yaml:"stream_name"` + StreamARN string `yaml:"stream_arn"` + UseEnhancedFanOut bool `yaml:"use_enhanced_fanout"` //Use RegisterStreamConsumer and SubscribeToShard instead of GetRecords + AwsProfile *string `yaml:"aws_profile"` + AwsRegion string `yaml:"aws_region"` + AwsEndpoint string `yaml:"aws_endpoint"` + ConsumerName string `yaml:"consumer_name"` + FromSubscription bool `yaml:"from_subscription"` + MaxRetries int `yaml:"max_retries"` +} + +type KinesisSource struct { + Config KinesisConfiguration + logger *log.Entry + kClient *kinesis.Kinesis + shardReaderTomb *tomb.Tomb +} + +type CloudWatchSubscriptionRecord struct { + MessageType string `json:"messageType"` + Owner string `json:"owner"` + LogGroup string `json:"logGroup"` + LogStream string `json:"logStream"` + SubscriptionFilters []string `json:"subscriptionFilters"` + LogEvents []CloudwatchSubscriptionLogEvent `json:"logEvents"` +} + +type CloudwatchSubscriptionLogEvent struct { + ID string `json:"id"` + Message string `json:"message"` + Timestamp int64 `json:"timestamp"` +} + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_kinesis_stream_hits_total", + Help: "Number of event read per stream.", + }, + []string{"stream"}, +) + +var linesReadShards = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_kinesis_shards_hits_total", + Help: "Number of event read per shards.", + }, + []string{"stream", "shard"}, +) + +func (k *KinesisSource) newClient() error { + var sess *session.Session + + if k.Config.AwsProfile != nil { + sess = session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + Profile: *k.Config.AwsProfile, + })) + } else { + sess = session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + } + + if sess == nil { + return fmt.Errorf("failed to create aws session") + } + config := aws.NewConfig() + if k.Config.AwsRegion != "" { + config = config.WithRegion(k.Config.AwsRegion) + } + if k.Config.AwsEndpoint != "" { + config = config.WithEndpoint(k.Config.AwsEndpoint) + } + k.kClient = kinesis.New(sess, config) + if k.kClient == nil { + return fmt.Errorf("failed to create kinesis client") + } + return nil +} + +func (k *KinesisSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead, linesReadShards} + +} +func (k *KinesisSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead, linesReadShards} +} + +func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry) error { + config := KinesisConfiguration{} + k.logger = logger + err := yaml.UnmarshalStrict(yamlConfig, &config) + if err != nil { + return errors.Wrap(err, "Cannot parse kinesis datasource configuration") + } + if config.Mode == "" { + config.Mode = configuration.TAIL_MODE + } + k.Config = config + if k.Config.StreamName == "" && !k.Config.UseEnhancedFanOut { + return fmt.Errorf("stream_name is mandatory when use_enhanced_fanout is false") + } + if k.Config.StreamARN == "" && k.Config.UseEnhancedFanOut { + return fmt.Errorf("stream_arn is mandatory when use_enhanced_fanout is true") + } + if k.Config.ConsumerName == "" && k.Config.UseEnhancedFanOut { + return fmt.Errorf("consumer_name is mandatory when use_enhanced_fanout is true") + } + if k.Config.StreamARN != "" && k.Config.StreamName != "" { + return fmt.Errorf("stream_arn and stream_name are mutually exclusive") + } + if k.Config.MaxRetries <= 0 { + k.Config.MaxRetries = 10 + } + err = k.newClient() + if err != nil { + return errors.Wrap(err, "Cannot create kinesis client") + } + k.shardReaderTomb = &tomb.Tomb{} + return nil +} + +func (k *KinesisSource) ConfigureByDSN(string, map[string]string, *log.Entry) error { + return fmt.Errorf("kinesis datasource does not support command-line acquisition") +} + +func (k *KinesisSource) GetMode() string { + return k.Config.Mode +} + +func (k *KinesisSource) GetName() string { + return "kinesis" +} + +func (k *KinesisSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + return fmt.Errorf("kinesis datasource does not support one-shot acquisition") +} + +func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) { + b := bytes.NewBuffer(record) + r, err := gzip.NewReader(b) + + if err != nil { + k.logger.Error(err) + return nil, err + } + decompressed, err := io.ReadAll(r) + if err != nil { + k.logger.Error(err) + return nil, err + } + var subscriptionRecord CloudWatchSubscriptionRecord + err = json.Unmarshal(decompressed, &subscriptionRecord) + if err != nil { + k.logger.Error(err) + return nil, err + } + return subscriptionRecord.LogEvents, nil +} + +func (k *KinesisSource) WaitForConsumerDeregistration(consumerName string, streamARN string) error { + maxTries := k.Config.MaxRetries + for i := 0; i < maxTries; i++ { + _, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{ + ConsumerName: aws.String(consumerName), + StreamARN: aws.String(streamARN), + }) + if err != nil { + switch err.(type) { + case *kinesis.ResourceNotFoundException: + return nil + default: + k.logger.Errorf("Error while waiting for consumer deregistration: %s", err) + return errors.Wrap(err, "Cannot describe stream consumer") + } + } + time.Sleep(time.Millisecond * 200 * time.Duration(i+1)) + } + return fmt.Errorf("consumer %s is not deregistered after %d tries", consumerName, maxTries) +} + +func (k *KinesisSource) DeregisterConsumer() error { + k.logger.Debugf("Deregistering consumer %s if it exists", k.Config.ConsumerName) + _, err := k.kClient.DeregisterStreamConsumer(&kinesis.DeregisterStreamConsumerInput{ + ConsumerName: aws.String(k.Config.ConsumerName), + StreamARN: aws.String(k.Config.StreamARN), + }) + if err != nil { + switch err.(type) { + case *kinesis.ResourceNotFoundException: + default: + return errors.Wrap(err, "Cannot deregister stream consumer") + } + } + err = k.WaitForConsumerDeregistration(k.Config.ConsumerName, k.Config.StreamARN) + if err != nil { + return errors.Wrap(err, "Cannot wait for consumer deregistration") + } + return nil +} + +func (k *KinesisSource) WaitForConsumerRegistration(consumerARN string) error { + maxTries := k.Config.MaxRetries + for i := 0; i < maxTries; i++ { + describeOutput, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{ + ConsumerARN: aws.String(consumerARN), + }) + if err != nil { + return errors.Wrap(err, "Cannot describe stream consumer") + } + if *describeOutput.ConsumerDescription.ConsumerStatus == "ACTIVE" { + k.logger.Debugf("Consumer %s is active", consumerARN) + return nil + } + time.Sleep(time.Millisecond * 200 * time.Duration(i+1)) + k.logger.Debugf("Waiting for consumer registration %d", i) + } + return fmt.Errorf("consumer %s is not active after %d tries", consumerARN, maxTries) +} + +func (k *KinesisSource) RegisterConsumer() (*kinesis.RegisterStreamConsumerOutput, error) { + k.logger.Debugf("Registering consumer %s", k.Config.ConsumerName) + streamConsumer, err := k.kClient.RegisterStreamConsumer(&kinesis.RegisterStreamConsumerInput{ + ConsumerName: aws.String(k.Config.ConsumerName), + StreamARN: aws.String(k.Config.StreamARN), + }) + if err != nil { + return nil, errors.Wrap(err, "Cannot register stream consumer") + } + err = k.WaitForConsumerRegistration(*streamConsumer.Consumer.ConsumerARN) + if err != nil { + return nil, errors.Wrap(err, "Timeout while waiting for consumer to be active") + } + return streamConsumer, nil +} + +func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan types.Event, logger *log.Entry, shardId string) { + for _, record := range records { + if k.Config.StreamARN != "" { + linesReadShards.With(prometheus.Labels{"stream": k.Config.StreamARN, "shard": shardId}).Inc() + linesRead.With(prometheus.Labels{"stream": k.Config.StreamARN}).Inc() + } else { + linesReadShards.With(prometheus.Labels{"stream": k.Config.StreamName, "shard": shardId}).Inc() + linesRead.With(prometheus.Labels{"stream": k.Config.StreamName}).Inc() + } + var data []CloudwatchSubscriptionLogEvent + var err error + if k.Config.FromSubscription { + //The AWS docs says that the data is base64 encoded + //but apparently GetRecords decodes it for us ? + data, err = k.decodeFromSubscription(record.Data) + if err != nil { + logger.Errorf("Cannot decode data: %s", err) + continue + } + } else { + data = []CloudwatchSubscriptionLogEvent{{Message: string(record.Data)}} + } + for _, event := range data { + logger.Tracef("got record %s", event.Message) + l := types.Line{} + l.Raw = event.Message + l.Labels = k.Config.Labels + l.Time = time.Now().UTC() + l.Process = true + l.Module = k.GetName() + if k.Config.StreamARN != "" { + l.Src = k.Config.StreamARN + } else { + l.Src = k.Config.StreamName + } + var evt types.Event + if !k.Config.UseTimeMachine { + evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leakybucket.LIVE} + } else { + evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leakybucket.TIMEMACHINE} + } + out <- evt + } + } +} + +func (k *KinesisSource) ReadFromSubscription(reader kinesis.SubscribeToShardEventStreamReader, out chan types.Event, shardId string, streamName string) error { + logger := k.logger.WithFields(log.Fields{"shard_id": shardId}) + //ghetto sync, kinesis allows to subscribe to a closed shard, which will make the goroutine exit immediately + //and we won't be able to start a new one if this is the first one started by the tomb + //TODO: look into parent shards to see if a shard is closed before starting to read it ? + time.Sleep(time.Second) + for { + select { + case <-k.shardReaderTomb.Dying(): + logger.Infof("Subscribed shard reader is dying") + err := reader.Close() + if err != nil { + return errors.Wrap(err, "Cannot close kinesis subscribed shard reader") + } + return nil + case event, ok := <-reader.Events(): + if !ok { + logger.Infof("Event chan has been closed") + return nil + } + switch event := event.(type) { + case *kinesis.SubscribeToShardEvent: + k.ParseAndPushRecords(event.Records, out, logger, shardId) + case *kinesis.SubscribeToShardEventStreamUnknownEvent: + logger.Infof("got an unknown event, what to do ?") + } + } + } +} + +func (k *KinesisSource) SubscribeToShards(arn arn.ARN, streamConsumer *kinesis.RegisterStreamConsumerOutput, out chan types.Event) error { + shards, err := k.kClient.ListShards(&kinesis.ListShardsInput{ + StreamName: aws.String(arn.Resource[7:]), + }) + if err != nil { + return errors.Wrap(err, "Cannot list shards for enhanced_read") + } + + for _, shard := range shards.Shards { + shardId := *shard.ShardId + r, err := k.kClient.SubscribeToShard(&kinesis.SubscribeToShardInput{ + ShardId: aws.String(shardId), + StartingPosition: &kinesis.StartingPosition{Type: aws.String(kinesis.ShardIteratorTypeLatest)}, + ConsumerARN: streamConsumer.Consumer.ConsumerARN, + }) + if err != nil { + return errors.Wrap(err, "Cannot subscribe to shard") + } + k.shardReaderTomb.Go(func() error { + return k.ReadFromSubscription(r.GetEventStream().Reader, out, shardId, arn.Resource[7:]) + }) + } + return nil +} + +func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { + parsedARN, err := arn.Parse(k.Config.StreamARN) + if err != nil { + return errors.Wrap(err, "Cannot parse stream ARN") + } + if !strings.HasPrefix(parsedARN.Resource, "stream/") { + return fmt.Errorf("resource part of stream ARN %s does not start with stream/", k.Config.StreamARN) + } + + k.logger = k.logger.WithFields(log.Fields{"stream": parsedARN.Resource[7:]}) + k.logger.Info("starting kinesis acquisition with enhanced fan-out") + err = k.DeregisterConsumer() + if err != nil { + return errors.Wrap(err, "Cannot deregister consumer") + } + + streamConsumer, err := k.RegisterConsumer() + if err != nil { + return errors.Wrap(err, "Cannot register consumer") + } + + for { + k.shardReaderTomb = &tomb.Tomb{} + + err = k.SubscribeToShards(parsedARN, streamConsumer, out) + if err != nil { + return errors.Wrap(err, "Cannot subscribe to shards") + } + select { + case <-t.Dying(): + k.logger.Infof("Kinesis source is dying") + k.shardReaderTomb.Kill(nil) + _ = k.shardReaderTomb.Wait() //we don't care about the error as we kill the tomb ourselves + err = k.DeregisterConsumer() + if err != nil { + return errors.Wrap(err, "Cannot deregister consumer") + } + return nil + case <-k.shardReaderTomb.Dying(): + k.logger.Debugf("Kinesis subscribed shard reader is dying") + if k.shardReaderTomb.Err() != nil { + return k.shardReaderTomb.Err() + } + //All goroutines have exited without error, so a resharding event, start again + k.logger.Debugf("All reader goroutines have exited, resharding event or periodic resubscribe") + continue + } + } +} + +func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) error { + logger := k.logger.WithFields(log.Fields{"shard": shardId}) + logger.Debugf("Starting to read shard") + sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ShardId: aws.String(shardId), + StreamName: &k.Config.StreamName, + ShardIteratorType: aws.String(kinesis.ShardIteratorTypeLatest)}) + if err != nil { + logger.Errorf("Cannot get shard iterator: %s", err) + return errors.Wrap(err, "Cannot get shard iterator") + } + it := sharIt.ShardIterator + //AWS recommends to wait for a second between calls to GetRecords for a given shard + ticker := time.NewTicker(time.Second) + for { + select { + case <-ticker.C: + records, err := k.kClient.GetRecords(&kinesis.GetRecordsInput{ShardIterator: it}) + it = records.NextShardIterator + if err != nil { + switch err.(type) { + case *kinesis.ProvisionedThroughputExceededException: + logger.Warn("Provisioned throughput exceeded") + //TODO: implement exponential backoff + continue + case *kinesis.ExpiredIteratorException: + logger.Warn("Expired iterator") + continue + default: + logger.Error("Cannot get records") + return errors.Wrap(err, "Cannot get records") + } + } + k.ParseAndPushRecords(records.Records, out, logger, shardId) + + if it == nil { + logger.Warnf("Shard has been closed") + return nil + } + case <-k.shardReaderTomb.Dying(): + logger.Infof("shardReaderTomb is dying, exiting ReadFromShard") + ticker.Stop() + return nil + } + } +} + +func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error { + k.logger = k.logger.WithFields(log.Fields{"stream": k.Config.StreamName}) + k.logger.Info("starting kinesis acquisition from shards") + for { + shards, err := k.kClient.ListShards(&kinesis.ListShardsInput{ + StreamName: aws.String(k.Config.StreamName), + }) + if err != nil { + return errors.Wrap(err, "Cannot list shards") + } + k.shardReaderTomb = &tomb.Tomb{} + for _, shard := range shards.Shards { + shardId := *shard.ShardId + k.shardReaderTomb.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/kinesis/streaming/shard") + return k.ReadFromShard(out, shardId) + }) + } + select { + case <-t.Dying(): + k.logger.Info("kinesis source is dying") + k.shardReaderTomb.Kill(nil) + _ = k.shardReaderTomb.Wait() //we don't care about the error as we kill the tomb ourselves + return nil + case <-k.shardReaderTomb.Dying(): + reason := k.shardReaderTomb.Err() + if reason != nil { + k.logger.Errorf("Unexpected error from shard reader : %s", reason) + return reason + } + k.logger.Infof("All shards have been closed, probably a resharding event, restarting acquisition") + continue + } + } +} + +func (k *KinesisSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + t.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/kinesis/streaming") + if k.Config.UseEnhancedFanOut { + return k.EnhancedRead(out, t) + } else { + return k.ReadFromStream(out, t) + } + }) + return nil +} + +func (k *KinesisSource) CanRun() error { + return nil +} + +func (k *KinesisSource) Dump() interface{} { + return k +} diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go new file mode 100644 index 0000000..46435ac --- /dev/null +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -0,0 +1,334 @@ +package kinesisacquisition + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "net" + "os" + "runtime" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "gopkg.in/tomb.v2" +) + +func getLocalStackEndpoint() (string, error) { + endpoint := "http://localhost:4566" + if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { + v = strings.TrimPrefix(v, "http://") + _, err := net.Dial("tcp", v) + if err != nil { + return "", fmt.Errorf("while dialing %s : %s : aws endpoint isn't available", v, err) + } + } + return endpoint, nil +} + +func GenSubObject(i int) []byte { + r := CloudWatchSubscriptionRecord{ + MessageType: "subscription", + Owner: "test", + LogGroup: "test", + LogStream: "test", + SubscriptionFilters: []string{"filter1"}, + LogEvents: []CloudwatchSubscriptionLogEvent{ + { + ID: "testid", + Message: fmt.Sprintf("%d", i), + Timestamp: time.Now().UTC().Unix(), + }, + }, + } + body, err := json.Marshal(r) + if err != nil { + log.Fatal(err) + } + var b bytes.Buffer + gz := gzip.NewWriter(&b) + gz.Write(body) + gz.Close() + //AWS actually base64 encodes the data, but it looks like kinesis automatically decodes it at some point + //localstack does not do it, so let's just write a raw gzipped stream + return b.Bytes() +} + +func WriteToStream(streamName string, count int, shards int, sub bool) { + endpoint, err := getLocalStackEndpoint() + if err != nil { + log.Fatal(err) + } + sess := session.Must(session.NewSession()) + kinesisClient := kinesis.New(sess, aws.NewConfig().WithEndpoint(endpoint).WithRegion("us-east-1")) + for i := 0; i < count; i++ { + partition := "partition" + if shards != 1 { + partition = fmt.Sprintf("partition-%d", i%shards) + } + var data []byte + if sub { + data = GenSubObject(i) + } else { + data = []byte(fmt.Sprintf("%d", i)) + } + _, err = kinesisClient.PutRecord(&kinesis.PutRecordInput{ + Data: data, + PartitionKey: aws.String(partition), + StreamName: aws.String(streamName), + }) + if err != nil { + fmt.Printf("Error writing to stream: %s\n", err) + log.Fatal(err) + } + } +} + +func TestMain(m *testing.M) { + os.Setenv("AWS_ACCESS_KEY_ID", "foobar") + os.Setenv("AWS_SECRET_ACCESS_KEY", "foobar") + + //delete_streams() + //create_streams() + code := m.Run() + //delete_streams() + os.Exit(code) +} + +func TestBadConfiguration(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + tests := []struct { + config string + expectedErr string + }{ + { + config: `source: kinesis`, + expectedErr: "stream_name is mandatory when use_enhanced_fanout is false", + }, + { + config: ` +source: kinesis +use_enhanced_fanout: true`, + expectedErr: "stream_arn is mandatory when use_enhanced_fanout is true", + }, + { + config: ` +source: kinesis +use_enhanced_fanout: true +stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, + expectedErr: "consumer_name is mandatory when use_enhanced_fanout is true", + }, + { + config: ` +source: kinesis +stream_name: foobar +stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, + expectedErr: "stream_arn and stream_name are mutually exclusive", + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "kinesis", + }) + for _, test := range tests { + f := KinesisSource{} + err := f.Configure([]byte(test.config), subLogger) + cstest.AssertErrorContains(t, err, test.expectedErr) + } +} + +func TestReadFromStream(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + tests := []struct { + config string + count int + shards int + }{ + { + config: `source: kinesis +aws_endpoint: %s +aws_region: us-east-1 +stream_name: stream-1-shard`, + count: 10, + shards: 1, + }, + } + endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { + f := KinesisSource{} + config := fmt.Sprintf(test.config, endpoint) + err := f.Configure([]byte(config), log.WithFields(log.Fields{ + "type": "kinesis", + })) + if err != nil { + t.Fatalf("Error configuring source: %s", err) + } + tomb := &tomb.Tomb{} + out := make(chan types.Event) + err = f.StreamingAcquisition(out, tomb) + if err != nil { + t.Fatalf("Error starting source: %s", err) + } + //Allow the datasource to start listening to the stream + time.Sleep(4 * time.Second) + WriteToStream(f.Config.StreamName, test.count, test.shards, false) + for i := 0; i < test.count; i++ { + e := <-out + assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) + } + tomb.Kill(nil) + tomb.Wait() + } +} + +func TestReadFromMultipleShards(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + tests := []struct { + config string + count int + shards int + }{ + { + config: `source: kinesis +aws_endpoint: %s +aws_region: us-east-1 +stream_name: stream-2-shards`, + count: 10, + shards: 2, + }, + } + endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { + f := KinesisSource{} + config := fmt.Sprintf(test.config, endpoint) + err := f.Configure([]byte(config), log.WithFields(log.Fields{ + "type": "kinesis", + })) + if err != nil { + t.Fatalf("Error configuring source: %s", err) + } + tomb := &tomb.Tomb{} + out := make(chan types.Event) + err = f.StreamingAcquisition(out, tomb) + if err != nil { + t.Fatalf("Error starting source: %s", err) + } + //Allow the datasource to start listening to the stream + time.Sleep(4 * time.Second) + WriteToStream(f.Config.StreamName, test.count, test.shards, false) + c := 0 + for i := 0; i < test.count; i++ { + <-out + c += 1 + } + assert.Equal(t, test.count, c) + tomb.Kill(nil) + tomb.Wait() + } +} + +func TestFromSubscription(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + tests := []struct { + config string + count int + shards int + }{ + { + config: `source: kinesis +aws_endpoint: %s +aws_region: us-east-1 +stream_name: stream-1-shard +from_subscription: true`, + count: 10, + shards: 1, + }, + } + endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { + f := KinesisSource{} + config := fmt.Sprintf(test.config, endpoint) + err := f.Configure([]byte(config), log.WithFields(log.Fields{ + "type": "kinesis", + })) + if err != nil { + t.Fatalf("Error configuring source: %s", err) + } + tomb := &tomb.Tomb{} + out := make(chan types.Event) + err = f.StreamingAcquisition(out, tomb) + if err != nil { + t.Fatalf("Error starting source: %s", err) + } + //Allow the datasource to start listening to the stream + time.Sleep(4 * time.Second) + WriteToStream(f.Config.StreamName, test.count, test.shards, true) + for i := 0; i < test.count; i++ { + e := <-out + assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) + } + tomb.Kill(nil) + tomb.Wait() + } +} + +/* +func TestSubscribeToStream(t *testing.T) { + tests := []struct { + config string + count int + shards int + }{ + { + config: `source: kinesis +aws_endpoint: %s +aws_region: us-east-1 +stream_arn: arn:aws:kinesis:us-east-1:000000000000:stream/stream-1-shard +consumer_name: consumer-1 +use_enhanced_fanout: true`, + count: 10, + shards: 1, + }, + } + endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { + f := KinesisSource{} + config := fmt.Sprintf(test.config, endpoint) + err := f.Configure([]byte(config), log.WithFields(log.Fields{ + "type": "kinesis", + })) + if err != nil { + t.Fatalf("Error configuring source: %s", err) + } + tomb := &tomb.Tomb{} + out := make(chan types.Event) + err = f.StreamingAcquisition(out, tomb) + if err != nil { + t.Fatalf("Error starting source: %s", err) + } + //Allow the datasource to start listening to the stream + time.Sleep(10 * time.Second) + WriteToStream("stream-1-shard", test.count, test.shards) + for i := 0; i < test.count; i++ { + e := <-out + assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) + } + } +} +*/ diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go new file mode 100644 index 0000000..3b59a80 --- /dev/null +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go @@ -0,0 +1,255 @@ +package rfc3164 + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/utils" +) + +type RFC3164Option func(*RFC3164) + +type RFC3164 struct { + PRI int + Timestamp time.Time + Hostname string + Tag string + Message string + PID string + // + len int + position int + buf []byte + useCurrentYear bool //If no year is specified in the timestamp, use the current year + strictHostname bool //If the hostname contains invalid characters or is not an IP, return an error +} + +const PRI_MAX_LEN = 3 + +//Order is important: format with the most information must be first because we will stop on the first match +var VALID_TIMESTAMPS = []string{ + time.RFC3339, + "Jan 02 15:04:05 2006", + "Jan _2 15:04:05 2006", + "Jan 02 15:04:05", + "Jan _2 15:04:05", +} + +func WithCurrentYear() RFC3164Option { + return func(r *RFC3164) { + r.useCurrentYear = true + } +} + +func WithStrictHostname() RFC3164Option { + return func(r *RFC3164) { + r.strictHostname = true + } +} + +func (r *RFC3164) parsePRI() error { + + pri := 0 + + if r.buf[r.position] != '<' { + return fmt.Errorf("PRI must start with '<'") + } + + r.position++ + + for r.position < r.len { + c := r.buf[r.position] + if c == '>' { + r.position++ + break + } + if c < '0' || c > '9' { + return fmt.Errorf("PRI must be a number") + } + pri = pri*10 + int(c-'0') + r.position++ + } + + if pri > 999 { + return fmt.Errorf("PRI must be up to 3 characters long") + } + + if r.position == r.len && r.buf[r.position-1] != '>' { + return fmt.Errorf("PRI must end with '>'") + } + + r.PRI = pri + return nil +} + +func (r *RFC3164) parseTimestamp() error { + validTs := false + for _, layout := range VALID_TIMESTAMPS { + tsLen := len(layout) + if r.position+tsLen > r.len { + continue + } + t, err := time.Parse(layout, string(r.buf[r.position:r.position+tsLen])) + if err == nil { + validTs = true + r.Timestamp = t + r.position += tsLen + break + } + } + if !validTs { + return fmt.Errorf("timestamp is not valid") + } + if r.useCurrentYear { + if r.Timestamp.Year() == 0 { + r.Timestamp = time.Date(time.Now().Year(), r.Timestamp.Month(), r.Timestamp.Day(), r.Timestamp.Hour(), r.Timestamp.Minute(), r.Timestamp.Second(), r.Timestamp.Nanosecond(), r.Timestamp.Location()) + } + } + r.position++ + return nil +} + +func (r *RFC3164) parseHostname() error { + hostname := []byte{} + for r.position < r.len { + c := r.buf[r.position] + if c == ' ' { + r.position++ + break + } + hostname = append(hostname, c) + r.position++ + } + if r.strictHostname { + if !utils.IsValidHostnameOrIP(string(hostname)) { + return fmt.Errorf("hostname is not valid") + } + } + if len(hostname) == 0 { + return fmt.Errorf("hostname is empty") + } + r.Hostname = string(hostname) + return nil +} + +//We do not enforce tag len as quite a lot of syslog client send tags with more than 32 chars +func (r *RFC3164) parseTag() error { + tag := []byte{} + tmpPid := []byte{} + pidEnd := false + hasPid := false + for r.position < r.len { + c := r.buf[r.position] + if !utils.IsAlphaNumeric(c) { + break + } + tag = append(tag, c) + r.position++ + } + if len(tag) == 0 { + return fmt.Errorf("tag is empty") + } + r.Tag = string(tag) + + if r.position == r.len { + return nil + } + + c := r.buf[r.position] + if c == '[' { + hasPid = true + r.position++ + for r.position < r.len { + c = r.buf[r.position] + if c == ']' { + pidEnd = true + r.position++ + break + } + if c < '0' || c > '9' { + return fmt.Errorf("pid inside tag must be a number") + } + tmpPid = append(tmpPid, c) + r.position++ + } + } + + if hasPid && !pidEnd { + return fmt.Errorf("pid inside tag must be closed with ']'") + } + + if hasPid { + r.PID = string(tmpPid) + } + return nil +} + +func (r *RFC3164) parseMessage() error { + err := r.parseTag() + if err != nil { + return err + } + + if r.position == r.len { + return fmt.Errorf("message is empty") + } + + c := r.buf[r.position] + + if c == ':' { + r.position++ + } + + for { + if r.position >= r.len { + return fmt.Errorf("message is empty") + } + c := r.buf[r.position] + if c != ' ' { + break + } + r.position++ + } + + message := r.buf[r.position:r.len] + r.Message = string(message) + return nil +} + +func (r *RFC3164) Parse(message []byte) error { + r.len = len(message) + if r.len == 0 { + return fmt.Errorf("message is empty") + } + r.buf = message + + err := r.parsePRI() + if err != nil { + return err + } + + err = r.parseTimestamp() + if err != nil { + return err + } + + err = r.parseHostname() + if err != nil { + return err + } + + err = r.parseMessage() + if err != nil { + return err + } + + return nil +} + +func NewRFC3164Parser(opts ...RFC3164Option) *RFC3164 { + r := &RFC3164{} + for _, opt := range opts { + opt(r) + } + return r +} diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go new file mode 100644 index 0000000..04b7307 --- /dev/null +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go @@ -0,0 +1,368 @@ +package rfc3164 + +import ( + "testing" + "time" +) + +func TestPri(t *testing.T) { + tests := []struct { + input string + expected int + expectedErr string + }{ + {"<0>", 0, ""}, + {"<19>", 19, ""}, + {"<200>", 200, ""}, + {"<4999>", 0, "PRI must be up to 3 characters long"}, + {"<123", 0, "PRI must end with '>'"}, + {"123>", 0, "PRI must start with '<'"}, + {"", 0, "PRI must be a number"}, + } + + for _, test := range tests { + test := test + t.Run(test.input, func(t *testing.T) { + r := &RFC3164{} + r.buf = []byte(test.input) + r.len = len(r.buf) + err := r.parsePRI() + if err != nil { + if test.expectedErr != "" { + if err.Error() != test.expectedErr { + t.Errorf("expected error %s, got %s", test.expectedErr, err) + } + } else { + t.Errorf("unexpected error: %s", err) + } + } else { + if test.expectedErr != "" { + t.Errorf("expected error %s, got no error", test.expectedErr) + } else if r.PRI != test.expected { + t.Errorf("expected %d, got %d", test.expected, r.PRI) + } + } + }) + } +} + +func TestTimestamp(t *testing.T) { + tests := []struct { + input string + expected string + expectedErr string + currentYear bool + }{ + {"May 20 09:33:54", "0000-05-20T09:33:54Z", "", false}, + {"May 20 09:33:54", "2022-05-20T09:33:54Z", "", true}, + {"May 20 09:33:54 2022", "2022-05-20T09:33:54Z", "", false}, + {"May 1 09:33:54 2022", "2022-05-01T09:33:54Z", "", false}, + {"May 01 09:33:54 2021", "2021-05-01T09:33:54Z", "", true}, + {"foobar", "", "timestamp is not valid", false}, + } + + for _, test := range tests { + test := test + t.Run(test.input, func(t *testing.T) { + opts := []RFC3164Option{} + if test.currentYear { + opts = append(opts, WithCurrentYear()) + } + r := NewRFC3164Parser(opts...) + r.buf = []byte(test.input) + r.len = len(r.buf) + err := r.parseTimestamp() + if err != nil { + if test.expectedErr != "" { + if err.Error() != test.expectedErr { + t.Errorf("expected error %s, got %s", test.expectedErr, err) + } + } else { + t.Errorf("unexpected error: %s", err) + } + } else { + if test.expectedErr != "" { + t.Errorf("expected error %s, got no error", test.expectedErr) + } else if r.Timestamp.Format(time.RFC3339) != test.expected { + t.Errorf("expected %s, got %s", test.expected, r.Timestamp.Format(time.RFC3339)) + } + } + }) + } +} + +func TestHostname(t *testing.T) { + tests := []struct { + input string + expected string + expectedErr string + strictHostname bool + }{ + {"127.0.0.1", "127.0.0.1", "", false}, + {"::1", "::1", "", false}, + {"foo.-bar", "", "hostname is not valid", true}, + {"foo-.bar", "", "hostname is not valid", true}, + {"foo123.bar", "foo123.bar", "", true}, + {"a..", "", "hostname is not valid", true}, + {"foo.bar", "foo.bar", "", false}, + {"foo,bar", "foo,bar", "", false}, + {"foo,bar", "", "hostname is not valid", true}, + {"", "", "hostname is empty", false}, + {".", ".", "", true}, + {"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "", "hostname is not valid", true}, + {"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.bla", "", "hostname is not valid", true}, + {"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.bla", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.bla", "", false}, + {"a.foo-", "", "hostname is not valid", true}, + } + + for _, test := range tests { + test := test + t.Run(test.input, func(t *testing.T) { + opts := []RFC3164Option{} + if test.strictHostname { + opts = append(opts, WithStrictHostname()) + } + r := NewRFC3164Parser(opts...) + r.buf = []byte(test.input) + r.len = len(r.buf) + err := r.parseHostname() + if err != nil { + if test.expectedErr != "" { + if err.Error() != test.expectedErr { + t.Errorf("expected error %s, got %s", test.expectedErr, err) + } + } else { + t.Errorf("unexpected error: %s", err) + } + } else { + if test.expectedErr != "" { + t.Errorf("expected error %s, got no error", test.expectedErr) + } else if r.Hostname != test.expected { + t.Errorf("expected %s, got %s", test.expected, r.Hostname) + } + } + }) + } +} + +func TestTag(t *testing.T) { + tests := []struct { + input string + expected string + expectedPID string + expectedErr string + }{ + {"foobar", "foobar", "", ""}, + {"foobar[42]", "foobar", "42", ""}, + {"", "", "", "tag is empty"}, + {"foobar[", "", "", "pid inside tag must be closed with ']'"}, + {"foobar[42", "", "", "pid inside tag must be closed with ']'"}, + {"foobar[asd]", "foobar", "", "pid inside tag must be a number"}, + } + + for _, test := range tests { + test := test + t.Run(test.input, func(t *testing.T) { + r := &RFC3164{} + r.buf = []byte(test.input) + r.len = len(r.buf) + err := r.parseTag() + if err != nil { + if test.expectedErr != "" { + if err.Error() != test.expectedErr { + t.Errorf("expected error %s, got %s", test.expectedErr, err) + } + } else { + t.Errorf("unexpected error: %s", err) + } + } else { + if test.expectedErr != "" { + t.Errorf("expected error %s, got no error", test.expectedErr) + } else { + if r.Tag != test.expected { + t.Errorf("expected %s, got %s", test.expected, r.Tag) + } + if r.PID != test.expectedPID { + t.Errorf("expected %s, got %s", test.expected, r.Message) + } + } + } + }) + } +} + +func TestMessage(t *testing.T) { + tests := []struct { + input string + expected string + expectedErr string + }{ + {"foobar: pouet", "pouet", ""}, + {"foobar[42]: test", "test", ""}, + {"foobar[123]: this is a test", "this is a test", ""}, + {"foobar[123]: ", "", "message is empty"}, + {"foobar[123]:", "", "message is empty"}, + } + + for _, test := range tests { + test := test + t.Run(test.input, func(t *testing.T) { + r := &RFC3164{} + r.buf = []byte(test.input) + r.len = len(r.buf) + err := r.parseMessage() + if err != nil { + if test.expectedErr != "" { + if err.Error() != test.expectedErr { + t.Errorf("expected error %s, got %s", test.expectedErr, err) + } + } else { + t.Errorf("unexpected error: %s", err) + } + } else { + if test.expectedErr != "" { + t.Errorf("expected error %s, got no error", test.expectedErr) + } else if r.Message != test.expected { + t.Errorf("expected message %s, got %s", test.expected, r.Tag) + } + } + }) + } +} + +func TestParse(t *testing.T) { + type expected struct { + Timestamp time.Time + Hostname string + Tag string + PID string + Message string + PRI int + } + tests := []struct { + input string + expected expected + expectedErr string + opts []RFC3164Option + }{ + { + "<12>May 20 09:33:54 UDMPRO,a2edd0c6ae48,udm-1.10.0.3686 kernel: foo", expected{ + Timestamp: time.Date(0, time.May, 20, 9, 33, 54, 0, time.UTC), + Hostname: "UDMPRO,a2edd0c6ae48,udm-1.10.0.3686", + Tag: "kernel", + PID: "", + Message: "foo", + PRI: 12, + }, "", []RFC3164Option{}, + }, + { + "<12>May 20 09:33:54 UDMPRO,a2edd0c6ae48,udm-1.10.0.3686 kernel: foo", expected{ + Timestamp: time.Date(2022, time.May, 20, 9, 33, 54, 0, time.UTC), + Hostname: "UDMPRO,a2edd0c6ae48,udm-1.10.0.3686", + Tag: "kernel", + PID: "", + Message: "foo", + PRI: 12, + }, "", []RFC3164Option{WithCurrentYear()}, + }, + { + "<12>May 20 09:33:54 UDMPRO,a2edd0c6ae48,udm-1.10.0.3686 kernel: foo", expected{}, "hostname is not valid", []RFC3164Option{WithStrictHostname()}, + }, + { + "foobar", expected{}, "PRI must start with '<'", []RFC3164Option{}, + }, + { + "<12>", expected{}, "timestamp is not valid", []RFC3164Option{}, + }, + { + "<12 May 02 09:33:54 foo.bar", expected{}, "PRI must be a number", []RFC3164Option{}, + }, + { + "<12>May 02 09:33:54", expected{}, "hostname is empty", []RFC3164Option{}, + }, + { + "<12>May 02 09:33:54 foo.bar", expected{}, "tag is empty", []RFC3164Option{}, + }, + { + "<12>May 02 09:33:54 foo.bar bla[42", expected{}, "pid inside tag must be closed with ']'", []RFC3164Option{}, + }, + { + "<12>May 02 09:33:54 foo.bar bla[42]", expected{}, "message is empty", []RFC3164Option{}, + }, + { + "<12>May 02 09:33:54 foo.bar bla[42]: ", expected{}, "message is empty", []RFC3164Option{}, + }, + { + "<12>May 02 09:33:54 foo.bar bla", expected{}, "message is empty", []RFC3164Option{}, + }, + { + "<12>May 02 09:33:54 foo.bar bla:", expected{}, "message is empty", []RFC3164Option{}, + }, + { + "", expected{}, "message is empty", []RFC3164Option{}, + }, + { + `<13>1 2021-05-18T11:58:40.828081+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{}, "timestamp is not valid", []RFC3164Option{}, + }, + { + `<46>Jun 2 06:55:39 localhost haproxy[27213]: Connect from 100.100.100.99:52611 to 100.100.100.99:443 (https_shared-merged/HTTP)\\n 10.0.0.1}`, expected{ + Timestamp: time.Date(time.Now().Year(), time.June, 2, 6, 55, 39, 0, time.UTC), + Hostname: "localhost", + Tag: "haproxy", + PID: "27213", + Message: `Connect from 100.100.100.99:52611 to 100.100.100.99:443 (https_shared-merged/HTTP)\\n 10.0.0.1}`, + PRI: 46, + }, "", []RFC3164Option{WithCurrentYear()}, + }, + { + `<46>Jun 2 06:55:39 2022 localhost haproxy[27213]: Connect from 100.100.100.99:52611 to 100.100.100.99:443 (https_shared-merged/HTTP)\\n 10.0.0.1}`, expected{ + Timestamp: time.Date(2022, time.June, 2, 6, 55, 39, 0, time.UTC), + Hostname: "localhost", + Tag: "haproxy", + PID: "27213", + Message: `Connect from 100.100.100.99:52611 to 100.100.100.99:443 (https_shared-merged/HTTP)\\n 10.0.0.1}`, + PRI: 46, + }, "", []RFC3164Option{}, + }, + } + + for _, test := range tests { + test := test + t.Run(test.input, func(t *testing.T) { + r := NewRFC3164Parser(test.opts...) + err := r.Parse([]byte(test.input)) + if err != nil { + if test.expectedErr != "" { + if err.Error() != test.expectedErr { + t.Errorf("expected error '%s', got '%s'", test.expectedErr, err) + } + } else { + t.Errorf("unexpected error: '%s'", err) + } + } else { + if test.expectedErr != "" { + t.Errorf("expected error '%s', got no error", test.expectedErr) + } else { + if r.Timestamp != test.expected.Timestamp { + t.Errorf("expected timestamp '%s', got '%s'", test.expected.Timestamp, r.Timestamp) + } + if r.Hostname != test.expected.Hostname { + t.Errorf("expected hostname '%s', got '%s'", test.expected.Hostname, r.Hostname) + } + if r.Tag != test.expected.Tag { + t.Errorf("expected tag '%s', got '%s'", test.expected.Tag, r.Tag) + } + if r.PID != test.expected.PID { + t.Errorf("expected pid '%s', got '%s'", test.expected.PID, r.PID) + } + if r.Message != test.expected.Message { + t.Errorf("expected message '%s', got '%s'", test.expected.Message, r.Message) + } + if r.PRI != test.expected.PRI { + t.Errorf("expected pri '%d', got '%d'", test.expected.PRI, r.PRI) + } + } + } + }) + } +} diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go new file mode 100644 index 0000000..42073ca --- /dev/null +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go @@ -0,0 +1,63 @@ +package rfc3164 + +import "testing" + +var e error + +func BenchmarkParse(b *testing.B) { + tests := []struct { + input []byte + opts []RFC3164Option + }{ + { + []byte("<12>May 20 09:33:54 UDMPRO,a2edd0c6ae48,udm-1.10.0.3686 kernel: [1670546.400229] IN=eth9 OUT= MAC=24:5a:4c:7b:0a:4c:34:27:92:67:0f:2b:08:00 SRC=79.124.62.34 DST=x.x.x.x LEN=44 TOS=0x00 PREC=0x00 TTL=243 ID=37520 PROTO=TCP SPT=55055 DPT=51443 WINDOW=1024 RES=0x00 SYN URGP=0"), []RFC3164Option{}, + }, + { + []byte("<12>May 20 09:33:54 UDMPRO,a2edd0c6ae48,udm-1.10.0.3686 kernel: foo"), []RFC3164Option{WithCurrentYear()}, + }, + { + []byte("<12>May 20 09:33:54 UDMPRO,a2edd0c6ae48,udm-1.10.0.3686 kernel: foo"), []RFC3164Option{WithStrictHostname()}, + }, + { + []byte("foobar"), []RFC3164Option{}, + }, + { + []byte("<12>"), []RFC3164Option{}, + }, + { + []byte("<12>May 02 09:33:54"), []RFC3164Option{}, + }, + { + []byte("<12>May 02 09:33:54 foo.bar"), []RFC3164Option{}, + }, + { + []byte("<12>May 02 09:33:54 foo.bar bla[42"), []RFC3164Option{}, + }, + { + []byte("<12>May 02 09:33:54 foo.bar bla[42]"), []RFC3164Option{}, + }, + { + []byte("<12>May 02 09:33:54 foo.bar bla[42]: "), []RFC3164Option{}, + }, + { + []byte("<12>May 02 09:33:54 foo.bar bla"), []RFC3164Option{}, + }, + { + []byte("<12>May 02 09:33:54 foo.bar bla:"), []RFC3164Option{}, + }, + { + []byte(""), []RFC3164Option{}, + }, + } + var err error + for _, test := range tests { + test := test + b.Run(string(test.input), func(b *testing.B) { + for i := 0; i < b.N; i++ { + r := NewRFC3164Parser(test.opts...) + err = r.Parse(test.input) + } + }) + } + e = err +} diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go new file mode 100644 index 0000000..8b71a77 --- /dev/null +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go @@ -0,0 +1,398 @@ +package rfc5424 + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/utils" +) + +type RFC5424Option func(*RFC5424) + +type RFC5424 struct { + PRI int + Timestamp time.Time + Hostname string + Tag string + Message string + PID string + MsgID string + // + len int + position int + buf []byte + useCurrentYear bool //If no year is specified in the timestamp, use the current year + strictHostname bool //If the hostname contains invalid characters or is not an IP, return an error +} + +const PRI_MAX_LEN = 3 + +const NIL_VALUE = '-' + +var VALID_TIMESTAMPS = []string{ + time.RFC3339, +} + +const VALID_TIMESTAMP = time.RFC3339Nano + +func WithCurrentYear() RFC5424Option { + return func(r *RFC5424) { + r.useCurrentYear = true + } +} + +func WithStrictHostname() RFC5424Option { + return func(r *RFC5424) { + r.strictHostname = true + } +} + +func (r *RFC5424) parsePRI() error { + + pri := 0 + + if r.buf[r.position] != '<' { + return fmt.Errorf("PRI must start with '<'") + } + + r.position++ + + for r.position < r.len { + c := r.buf[r.position] + if c == '>' { + r.position++ + break + } + if c < '0' || c > '9' { + return fmt.Errorf("PRI must be a number") + } + pri = pri*10 + int(c-'0') + r.position++ + } + + if pri > 999 { + return fmt.Errorf("PRI must be up to 3 characters long") + } + + if r.position == r.len && r.buf[r.position-1] != '>' { + return fmt.Errorf("PRI must end with '>'") + } + + r.PRI = pri + return nil +} + +func (r *RFC5424) parseVersion() error { + if r.buf[r.position] != '1' { + return fmt.Errorf("version must be 1") + } + r.position += 2 + if r.position >= r.len { + return fmt.Errorf("version must be followed by a space") + } + return nil +} + +func (r *RFC5424) parseTimestamp() error { + + timestamp := []byte{} + + if r.buf[r.position] == NIL_VALUE { + r.Timestamp = time.Now().UTC().Round(0) + r.position += 2 + return nil + } + + for r.position < r.len { + c := r.buf[r.position] + if c == ' ' { + break + } + timestamp = append(timestamp, c) + r.position++ + } + + if len(timestamp) == 0 { + return fmt.Errorf("timestamp is empty") + } + + if r.position == r.len { + return fmt.Errorf("EOL after timestamp") + } + + date, err := time.Parse(VALID_TIMESTAMP, string(timestamp)) + + if err != nil { + return fmt.Errorf("timestamp is not valid") + } + + r.Timestamp = date + + r.position++ + + if r.position >= r.len { + return fmt.Errorf("EOL after timestamp") + } + + return nil +} + +func (r *RFC5424) parseHostname() error { + if r.buf[r.position] == NIL_VALUE { + r.Hostname = "" + r.position += 2 + return nil + } + + hostname := []byte{} + for r.position < r.len { + c := r.buf[r.position] + if c == ' ' { + r.position++ + break + } + hostname = append(hostname, c) + r.position++ + } + if r.strictHostname { + if !utils.IsValidHostnameOrIP(string(hostname)) { + return fmt.Errorf("hostname is not valid") + } + } + if len(hostname) == 0 { + return fmt.Errorf("hostname is empty") + } + r.Hostname = string(hostname) + return nil +} + +func (r *RFC5424) parseAppName() error { + if r.buf[r.position] == NIL_VALUE { + r.Tag = "" + r.position += 2 + return nil + } + + appname := []byte{} + for r.position < r.len { + c := r.buf[r.position] + if c == ' ' { + r.position++ + break + } + appname = append(appname, c) + r.position++ + } + + if len(appname) == 0 { + return fmt.Errorf("appname is empty") + } + + if len(appname) > 48 { + return fmt.Errorf("appname is too long") + } + + r.Tag = string(appname) + return nil +} + +func (r *RFC5424) parseProcID() error { + if r.buf[r.position] == NIL_VALUE { + r.PID = "" + r.position += 2 + return nil + } + + procid := []byte{} + for r.position < r.len { + c := r.buf[r.position] + if c == ' ' { + r.position++ + break + } + procid = append(procid, c) + r.position++ + } + + if len(procid) == 0 { + return fmt.Errorf("procid is empty") + } + + if len(procid) > 128 { + return fmt.Errorf("procid is too long") + } + + r.PID = string(procid) + return nil +} + +func (r *RFC5424) parseMsgID() error { + if r.buf[r.position] == NIL_VALUE { + r.MsgID = "" + r.position += 2 + return nil + } + + msgid := []byte{} + for r.position < r.len { + c := r.buf[r.position] + if c == ' ' { + r.position++ + break + } + msgid = append(msgid, c) + r.position++ + } + + if len(msgid) == 0 { + return fmt.Errorf("msgid is empty") + } + + if len(msgid) > 32 { + return fmt.Errorf("msgid is too long") + } + + r.MsgID = string(msgid) + return nil +} + +func (r *RFC5424) parseStructuredData() error { + done := false + if r.buf[r.position] == NIL_VALUE { + r.position += 2 + return nil + } + if r.buf[r.position] != '[' { + return fmt.Errorf("structured data must start with '[' or be '-'") + } + prev := byte(0) + for r.position < r.len { + done = false + c := r.buf[r.position] + if c == ']' && prev != '\\' { + done = true + r.position++ + if r.position < r.len && r.buf[r.position] == ' ' { + break + } + } + prev = c + r.position++ + } + r.position++ + if !done { + return fmt.Errorf("structured data must end with ']'") + } + return nil +} + +func (r *RFC5424) parseMessage() error { + if r.position == r.len { + return fmt.Errorf("message is empty") + } + + message := []byte{} + + for r.position < r.len { + c := r.buf[r.position] + message = append(message, c) + r.position++ + } + r.Message = string(message) + return nil +} + +func (r *RFC5424) Parse(message []byte) error { + r.len = len(message) + if r.len == 0 { + return fmt.Errorf("syslog line is empty") + } + r.buf = message + + err := r.parsePRI() + if err != nil { + return err + } + + if r.position >= r.len { + return fmt.Errorf("EOL after PRI") + } + + err = r.parseVersion() + if err != nil { + return err + } + + if r.position >= r.len { + return fmt.Errorf("EOL after Version") + } + + err = r.parseTimestamp() + if err != nil { + return err + } + + if r.position >= r.len { + return fmt.Errorf("EOL after Timestamp") + } + + err = r.parseHostname() + if err != nil { + return err + } + + if r.position >= r.len { + return fmt.Errorf("EOL after hostname") + } + + err = r.parseAppName() + if err != nil { + return err + } + + if r.position >= r.len { + return fmt.Errorf("EOL after appname") + } + + err = r.parseProcID() + if err != nil { + return err + } + + if r.position >= r.len { + return fmt.Errorf("EOL after ProcID") + } + + err = r.parseMsgID() + if err != nil { + return err + } + + if r.position >= r.len { + return fmt.Errorf("EOL after MSGID") + } + + err = r.parseStructuredData() + if err != nil { + return err + } + + if r.position >= r.len { + return fmt.Errorf("EOL after SD") + } + + err = r.parseMessage() + if err != nil { + return err + } + + return nil +} + +func NewRFC5424Parser(opts ...RFC5424Option) *RFC5424 { + r := &RFC5424{} + for _, opt := range opts { + opt(r) + } + return r +} diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go new file mode 100644 index 0000000..af123ad --- /dev/null +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go @@ -0,0 +1,268 @@ +package rfc5424 + +import ( + "testing" + "time" +) + +func TestPri(t *testing.T) { + tests := []struct { + input string + expected int + expectedErr string + }{ + {"<0>", 0, ""}, + {"<19>", 19, ""}, + {"<200>", 200, ""}, + {"<4999>", 0, "PRI must be up to 3 characters long"}, + {"<123", 0, "PRI must end with '>'"}, + {"123>", 0, "PRI must start with '<'"}, + {"", 0, "PRI must be a number"}, + } + + for _, test := range tests { + test := test + t.Run(test.input, func(t *testing.T) { + r := &RFC5424{} + r.buf = []byte(test.input) + r.len = len(r.buf) + err := r.parsePRI() + if err != nil { + if test.expectedErr != "" { + if err.Error() != test.expectedErr { + t.Errorf("expected error %s, got %s", test.expectedErr, err) + } + } else { + t.Errorf("unexpected error: %s", err) + } + } else { + if test.expectedErr != "" { + t.Errorf("expected error %s, got no error", test.expectedErr) + } else if r.PRI != test.expected { + t.Errorf("expected %d, got %d", test.expected, r.PRI) + } + } + }) + } +} + +func TestHostname(t *testing.T) { + tests := []struct { + input string + expected string + expectedErr string + strictHostname bool + }{ + {"127.0.0.1", "127.0.0.1", "", false}, + {"::1", "::1", "", false}, + {"-", "", "", false}, + {"foo.-bar", "", "hostname is not valid", true}, + {"foo-.bar", "", "hostname is not valid", true}, + {"foo123.bar", "foo123.bar", "", true}, + {"a..", "", "hostname is not valid", true}, + {"foo.bar", "foo.bar", "", false}, + {"foo,bar", "foo,bar", "", false}, + {"foo,bar", "", "hostname is not valid", true}, + {".", ".", "", true}, + {"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "", "hostname is not valid", true}, + {"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.bla", "", "hostname is not valid", true}, + {"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.bla", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.bla", "", false}, + {"a.foo-", "", "hostname is not valid", true}, + } + + for _, test := range tests { + test := test + t.Run(test.input, func(t *testing.T) { + opts := []RFC5424Option{} + if test.strictHostname { + opts = append(opts, WithStrictHostname()) + } + r := NewRFC5424Parser(opts...) + r.buf = []byte(test.input) + r.len = len(r.buf) + err := r.parseHostname() + if err != nil { + if test.expectedErr != "" { + if err.Error() != test.expectedErr { + t.Errorf("expected error %s, got %s", test.expectedErr, err) + } + } else { + t.Errorf("unexpected error: %s", err) + } + } else { + if test.expectedErr != "" { + t.Errorf("expected error %s, got no error", test.expectedErr) + } else if r.Hostname != test.expected { + t.Errorf("expected %s, got %s", test.expected, r.Hostname) + } + } + }) + } +} + +func TestParse(t *testing.T) { + type expected struct { + Timestamp time.Time + Hostname string + Tag string + PID string + Message string + PRI int + MsgID string + } + + tests := []struct { + name string + input string + expected expected + expectedErr string + opts []RFC5424Option + }{ + { + "valid msg", + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{ + Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), + Hostname: "mantis", + Tag: "sshd", + PID: "49340", + MsgID: "", + Message: "blabla", + PRI: 13, + }, "", []RFC5424Option{}, + }, + { + "valid msg with msgid", + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{ + Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), + Hostname: "mantis", + Tag: "foobar", + PID: "49340", + MsgID: "123123", + Message: "blabla", + PRI: 13, + }, "", []RFC5424Option{}, + }, + { + "valid msg with repeating SD", + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"][foo="bar][a] blabla`, expected{ + Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), + Hostname: "mantis", + Tag: "foobar", + PID: "49340", + MsgID: "123123", + Message: "blabla", + PRI: 13, + }, "", []RFC5424Option{}, + }, + { + "invalid SD", + `<13>1 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality asd`, expected{}, "structured data must end with ']'", []RFC5424Option{}, + }, + { + "invalid version", + `<13>42 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{}, "version must be 1", []RFC5424Option{}, + }, + { + "invalid message", + `<13>1`, expected{}, "version must be followed by a space", []RFC5424Option{}, + }, + { + "valid msg with empty fields", + `<13>1 - foo - - - - blabla`, expected{ + Timestamp: time.Now().UTC().Round(0), + Hostname: "foo", + PRI: 13, + Message: "blabla", + }, "", []RFC5424Option{}, + }, + { + "valid msg with empty fields", + `<13>1 - - - - - - blabla`, expected{ + Timestamp: time.Now().UTC().Round(0), + PRI: 13, + Message: "blabla", + }, "", []RFC5424Option{}, + }, + { + "valid msg with escaped SD", + `<13>1 2022-05-24T10:57:39Z testhostname unknown - sn="msgid" [foo="\]" bar="a\""][a b="[\]" c] testmessage`, + expected{ + PRI: 13, + Timestamp: time.Date(2022, 5, 24, 10, 57, 39, 0, time.UTC), + Tag: "unknown", + Hostname: "testhostname", + MsgID: `sn="msgid"`, + Message: `testmessage`, + }, "", []RFC5424Option{}, + }, + { + "valid complex msg", + `<13>1 2022-05-24T10:57:39Z myhostname unknown - sn="msgid" [all@0 request="/dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js" src_ip_geo_country="DE" MONTH="May" COMMONAPACHELOG="1.1.1.1 - - [24/May/2022:10:57:37 +0200\] \"GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0\" 304 0" auth="-" HOUR="10" gl2_remote_ip="172.31.32.142" ident="-" gl2_remote_port="43375" BASE10NUM="[2.0, 304, 0\]" pid="-1" program="nginx" gl2_source_input="623ed3440183476d61cff974" INT="+0200" is_private_ip="false" YEAR="2022" src_ip_geo_city="Achern" clientip="1.1.1.1" USERNAME="-" src_ip_geo_location="48.6306,8.0743" gl2_source_node="8620c2bb-dbb7-4535-b1ce-83df223acd8d" MINUTE="57" timestamp="2022-05-24T08:57:37.000Z" src_ip_asn="3320" level="5" IP="1.1.1.1" IPV4="1.1.1.1" verb="GET" gl2_message_id="01G3TMJFAMFS4H60QSF7M029R0" TIME="10:57:37" USER="-" src_ip_asn_owner="Deutsche Telekom AG" response="304" bytes="0" SECOND="37" httpversion="2.0" _id="906ce155-db3f-11ec-b25f-0a189ba2c64e" facility="user" MONTHDAY="24"] source: sn="www.foobar.com" | message: 1.1.1.1 - - [24/May/2022:10:57:37 +0200] "GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0" 304 0 "https://www.foobar.com/sw.js" "Mozilla/5.0 (Linux; Android 9; ANE-LX1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.61 Mobile Safari/537.36" "-" "www.foobar.com" sn="www.foobar.com" rt=0.000 ua="-" us="-" ut="-" ul="-" cs=HIT { request: /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js | src_ip_geo_country: DE | MONTH: May | COMMONAPACHELOG: 1.1.1.1 - - [24/May/2022:10:57:37 +0200] "GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0" 304 0 | auth: - | HOUR: 10 | gl2_remote_ip: 172.31.32.142 | ident: - | gl2_remote_port: 43375 | BASE10NUM: [2.0, 304, 0] | pid: -1 | program: nginx | gl2_source_input: 623ed3440183476d61cff974 | INT: +0200 | is_private_ip: false | YEAR: 2022 | src_ip_geo_city: Achern | clientip: 1.1.1.1 | USERNAME:`, + expected{ + Timestamp: time.Date(2022, 5, 24, 10, 57, 39, 0, time.UTC), + Hostname: "myhostname", + Tag: "unknown", + PRI: 13, + MsgID: `sn="msgid"`, + Message: `source: sn="www.foobar.com" | message: 1.1.1.1 - - [24/May/2022:10:57:37 +0200] "GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0" 304 0 "https://www.foobar.com/sw.js" "Mozilla/5.0 (Linux; Android 9; ANE-LX1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.61 Mobile Safari/537.36" "-" "www.foobar.com" sn="www.foobar.com" rt=0.000 ua="-" us="-" ut="-" ul="-" cs=HIT { request: /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js | src_ip_geo_country: DE | MONTH: May | COMMONAPACHELOG: 1.1.1.1 - - [24/May/2022:10:57:37 +0200] "GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0" 304 0 | auth: - | HOUR: 10 | gl2_remote_ip: 172.31.32.142 | ident: - | gl2_remote_port: 43375 | BASE10NUM: [2.0, 304, 0] | pid: -1 | program: nginx | gl2_source_input: 623ed3440183476d61cff974 | INT: +0200 | is_private_ip: false | YEAR: 2022 | src_ip_geo_city: Achern | clientip: 1.1.1.1 | USERNAME:`, + }, "", []RFC5424Option{}, + }, + { + "partial message", + `<13>1 2022-05-24T10:57:39Z foo bar -`, + expected{}, + "EOL after ProcID", + []RFC5424Option{}, + }, + { + "partial message", + `<13>1 2022-05-24T10:57:39Z foo bar `, + expected{}, + "EOL after appname", + []RFC5424Option{}, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + r := NewRFC5424Parser(test.opts...) + err := r.Parse([]byte(test.input)) + if err != nil { + if test.expectedErr != "" { + if err.Error() != test.expectedErr { + t.Errorf("expected error '%s', got '%s'", test.expectedErr, err) + } + } else { + t.Errorf("unexpected error: '%s'", err) + } + } else { + if test.expectedErr != "" { + t.Errorf("expected error '%s', got no error", test.expectedErr) + } else { + if r.Timestamp.Round(time.Second).String() != test.expected.Timestamp.Round(time.Second).String() { + t.Errorf("expected timestamp '%s', got '%s'", test.expected.Timestamp, r.Timestamp) + } + if r.Hostname != test.expected.Hostname { + t.Errorf("expected hostname '%s', got '%s'", test.expected.Hostname, r.Hostname) + } + if r.Tag != test.expected.Tag { + t.Errorf("expected tag '%s', got '%s'", test.expected.Tag, r.Tag) + } + if r.PID != test.expected.PID { + t.Errorf("expected pid '%s', got '%s'", test.expected.PID, r.PID) + } + if r.Message != test.expected.Message { + t.Errorf("expected message '%s', got '%s'", test.expected.Message, r.Message) + } + if r.PRI != test.expected.PRI { + t.Errorf("expected pri '%d', got '%d'", test.expected.PRI, r.PRI) + } + if r.MsgID != test.expected.MsgID { + t.Errorf("expected msgid '%s', got '%s'", test.expected.MsgID, r.MsgID) + } + } + } + }) + } +} diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go new file mode 100644 index 0000000..318571e --- /dev/null +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go @@ -0,0 +1,104 @@ +package rfc5424 + +import "testing" + +var e error + +const BOM = "\xEF\xBB\xBF" + +//Test case are from https://github.com/influxdata/go-syslog (the parser we used previously) + +func BenchmarkParse(b *testing.B) { + tests := []struct { + label string + input []byte + }{ + { + label: "[no] empty input", + input: []byte(``), + }, + { + label: "[no] multiple syslog messages on multiple lines", + input: []byte("<1>1 - - - - - -\x0A<2>1 - - - - - -"), + }, + { + label: "[no] impossible timestamp", + input: []byte(`<101>11 2003-09-31T22:14:15.003Z`), + }, + { + label: "[no] malformed structured data", + input: []byte("<1>1 - - - - - X"), + }, + { + label: "[no] with duplicated structured data id", + input: []byte("<165>3 2003-10-11T22:14:15.003Z example.com evnts - ID27 [id1][id1]"), + }, + { + label: "[ok] minimal", + input: []byte(`<1>1 - - - - - -`), + }, + { + label: "[ok] average message", + input: []byte(`<29>1 2016-02-21T04:32:57+00:00 web1 someservice - - [origin x-service="someservice"][meta sequenceId="14125553"] 127.0.0.1 - - 1456029177 "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`), + }, + { + label: "[ok] complicated message", + input: []byte(`<78>1 2016-01-15T00:04:01Z host1 CROND 10391 - [meta sequenceId="29" sequenceBlah="foo"][my key="value"] some_message`), + }, + { + label: "[ok] very long message", + input: []byte(`<190>1 2016-02-21T01:19:11+00:00 batch6sj - - - [meta sequenceId="21881798" x-group="37051387"][origin x-service="tracking"] metascutellar conversationalist nephralgic exogenetic graphy streng outtaken acouasm amateurism prenotice Lyonese bedull antigrammatical diosphenol gastriloquial bayoneteer sweetener naggy roughhouser dighter addend sulphacid uneffectless ferroprussiate reveal Mazdaist plaudite Australasian distributival wiseman rumness Seidel topazine shahdom sinsion mesmerically pinguedinous ophthalmotonometer scuppler wound eciliate expectedly carriwitchet dictatorialism bindweb pyelitic idic atule kokoon poultryproof rusticial seedlip nitrosate splenadenoma holobenthic uneternal Phocaean epigenic doubtlessly indirection torticollar robomb adoptedly outspeak wappenschawing talalgia Goop domitic savola unstrafed carded unmagnified mythologically orchester obliteration imperialine undisobeyed galvanoplastical cycloplegia quinquennia foremean umbonal marcgraviaceous happenstance theoretical necropoles wayworn Igbira pseudoangelic raising unfrounced lamasary centaurial Japanolatry microlepidoptera`), + }, + { + label: "[ok] all max length and complete", + input: []byte(`<191>999 2018-12-31T23:59:59.999999-23:59 abcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabc abcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdef abcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzab abcdefghilmnopqrstuvzabcdefghilm [an@id key1="val1" key2="val2"][another@id key1="val1"] Some message "GET"`), + }, + { + label: "[ok] all max length except structured data and message", + input: []byte(`<191>999 2018-12-31T23:59:59.999999-23:59 abcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabc abcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdef abcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzabcdefghilmnopqrstuvzab abcdefghilmnopqrstuvzabcdefghilm -`), + }, + { + label: "[ok] minimal with message containing newline", + input: []byte("<1>1 - - - - - - x\x0Ay"), + }, + { + label: "[ok] w/o procid, w/o structured data, with message starting with BOM", + input: []byte("<34>1 2003-10-11T22:14:15.003Z mymachine.example.com su - ID47 - " + BOM + "'su root' failed for lonvick on /dev/pts/8"), + }, + { + label: "[ok] minimal with UTF-8 message", + input: []byte("<0>1 - - - - - - ⠊⠀⠉⠁⠝⠀⠑⠁⠞⠀⠛⠇⠁⠎⠎⠀⠁⠝⠙⠀⠊⠞⠀⠙⠕⠑⠎⠝⠞⠀⠓⠥⠗⠞⠀⠍⠑"), + }, + { + label: "[ok] minimal with UTF-8 message starting with BOM", + input: []byte("<0>1 - - - - - - " + BOM + "⠊⠀⠉⠁⠝⠀⠑⠁⠞⠀⠛⠇⠁⠎⠎⠀⠁⠝⠙⠀⠊⠞⠀⠙⠕⠑⠎⠝⠞⠀⠓⠥⠗⠞⠀⠍⠑"), + }, + { + label: "[ok] with structured data id, w/o structured data params", + input: []byte(`<29>50 2016-01-15T01:00:43Z hn S - - [my@id]`), + }, + { + label: "[ok] with multiple structured data", + input: []byte(`<29>50 2016-01-15T01:00:43Z hn S - - [my@id1 k="v"][my@id2 c="val"]`), + }, + { + label: "[ok] with escaped backslash within structured data param value, with message", + input: []byte(`<29>50 2016-01-15T01:00:43Z hn S - - [meta es="\\valid"] 1452819643`), + }, + { + label: "[ok] with UTF-8 structured data param value, with message", + input: []byte(`<78>1 2016-01-15T00:04:01+00:00 host1 CROND 10391 - [sdid x="⌘"] some_message`), + }, + } + var err error + for _, test := range tests { + test := test + b.Run(test.label, func(b *testing.B) { + for i := 0; i < b.N; i++ { + r := NewRFC5424Parser() + err = r.Parse(test.input) + } + }) + } + e = err +} diff --git a/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go b/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go new file mode 100644 index 0000000..8fe717a --- /dev/null +++ b/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go @@ -0,0 +1,76 @@ +package utils + +import "net" + +func isValidIP(ip string) bool { + return net.ParseIP(ip) != nil +} + +func IsAlphaNumeric(c byte) bool { + return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' +} + +//This function is lifted from go source +//See https://github.com/golang/go/blob/master/src/net/dnsclient.go#L75 +func isValidHostname(s string) bool { + // The root domain name is valid. See golang.org/issue/45715. + if s == "." { + return true + } + + // See RFC 1035, RFC 3696. + // Presentation format has dots before every label except the first, and the + // terminal empty label is optional here because we assume fully-qualified + // (absolute) input. We must therefore reserve space for the first and last + // labels' length octets in wire format, where they are necessary and the + // maximum total length is 255. + // So our _effective_ maximum is 253, but 254 is not rejected if the last + // character is a dot. + l := len(s) + if l == 0 || l > 254 || l == 254 && s[l-1] != '.' { + return false + } + + last := byte('.') + nonNumeric := false // true once we've seen a letter or hyphen + partlen := 0 + for i := 0; i < len(s); i++ { + c := s[i] + switch { + default: + return false + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_': + nonNumeric = true + partlen++ + case '0' <= c && c <= '9': + // fine + partlen++ + case c == '-': + // Byte before dash cannot be dot. + if last == '.' { + return false + } + partlen++ + nonNumeric = true + case c == '.': + // Byte before dot cannot be dot, dash. + if last == '.' || last == '-' { + return false + } + if partlen > 63 || partlen == 0 { + return false + } + partlen = 0 + } + last = c + } + if last == '-' || partlen > 63 { + return false + } + + return nonNumeric +} + +func IsValidHostnameOrIP(hostname string) bool { + return isValidIP(hostname) || isValidHostname(hostname) +} diff --git a/pkg/acquisition/modules/syslog/internal/server/syslogserver.go b/pkg/acquisition/modules/syslog/internal/server/syslogserver.go new file mode 100644 index 0000000..088ab0d --- /dev/null +++ b/pkg/acquisition/modules/syslog/internal/server/syslogserver.go @@ -0,0 +1,94 @@ +package syslogserver + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" +) + +type SyslogServer struct { + listenAddr string + port int + channel chan SyslogMessage + udpConn *net.UDPConn + Logger *log.Entry + MaxMessageLen int +} + +type SyslogMessage struct { + Message []byte + Client string +} + +func (s *SyslogServer) Listen(listenAddr string, port int) error { + + s.listenAddr = listenAddr + s.port = port + udpAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", s.listenAddr, s.port)) + if err != nil { + return errors.Wrapf(err, "could not resolve addr %s", s.listenAddr) + } + udpConn, err := net.ListenUDP("udp", udpAddr) + if err != nil { + return errors.Wrapf(err, "could not listen on port %d", s.port) + } + s.Logger.Debugf("listening on %s:%d", s.listenAddr, s.port) + s.udpConn = udpConn + + err = s.udpConn.SetReadDeadline(time.Now().UTC().Add(100 * time.Millisecond)) + if err != nil { + return errors.Wrap(err, "could not set read deadline on UDP socket") + } + return nil +} + +func (s *SyslogServer) SetChannel(c chan SyslogMessage) { + s.channel = c +} + +func (s *SyslogServer) StartServer() *tomb.Tomb { + t := tomb.Tomb{} + + t.Go(func() error { + for { + select { + case <-t.Dying(): + s.Logger.Info("Syslog server tomb is dying") + err := s.KillServer() + return err + default: + //RFC3164 says 1024 bytes max + //RFC5424 says 480 bytes minimum, and should support up to 2048 bytes + b := make([]byte, s.MaxMessageLen) + n, addr, err := s.udpConn.ReadFrom(b) + if err != nil && !strings.Contains(err.Error(), "i/o timeout") { + s.Logger.Errorf("error while reading from socket : %s", err) + s.udpConn.Close() + return err + } + if err == nil { + s.channel <- SyslogMessage{Message: b[:n], Client: strings.Split(addr.String(), ":")[0]} + } + err = s.udpConn.SetReadDeadline(time.Now().UTC().Add(100 * time.Millisecond)) + if err != nil { + return err + } + } + } + }) + return &t +} + +func (s *SyslogServer) KillServer() error { + err := s.udpConn.Close() + if err != nil { + return errors.Wrap(err, "could not close UDP connection") + } + close(s.channel) + return nil +} diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go new file mode 100644 index 0000000..2cd0083 --- /dev/null +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -0,0 +1,218 @@ +package syslogacquisition + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/rfc3164" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/rfc5424" + syslogserver "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/server" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" +) + +type SyslogConfiguration struct { + Proto string `yaml:"protocol,omitempty"` + Port int `yaml:"listen_port,omitempty"` + Addr string `yaml:"listen_addr,omitempty"` + MaxMessageLen int `yaml:"max_message_len,omitempty"` + configuration.DataSourceCommonCfg `yaml:",inline"` +} + +type SyslogSource struct { + config SyslogConfiguration + logger *log.Entry + server *syslogserver.SyslogServer + serverTomb *tomb.Tomb +} + +var linesReceived = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_syslogsource_hits_total", + Help: "Total lines that were received.", + }, + []string{"source"}) + +var linesParsed = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_syslogsource_parsed_total", + Help: "Total lines that were successfully parsed", + }, + []string{"source", "type"}) + +func (s *SyslogSource) GetName() string { + return "syslog" +} + +func (s *SyslogSource) GetMode() string { + return s.config.Mode +} + +func (s *SyslogSource) Dump() interface{} { + return s +} + +func (s *SyslogSource) CanRun() error { + return nil +} + +func (s *SyslogSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesReceived, linesParsed} +} + +func (s *SyslogSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesReceived, linesParsed} +} + +func (s *SyslogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error { + return fmt.Errorf("syslog datasource does not support one shot acquisition") +} + +func (s *SyslogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + return fmt.Errorf("syslog datasource does not support one shot acquisition") +} + +func validatePort(port int) bool { + return port > 0 && port <= 65535 +} + +func validateAddr(addr string) bool { + return net.ParseIP(addr) != nil +} + +func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry) error { + s.logger = logger + s.logger.Infof("Starting syslog datasource configuration") + syslogConfig := SyslogConfiguration{} + syslogConfig.Mode = configuration.TAIL_MODE + err := yaml.UnmarshalStrict(yamlConfig, &syslogConfig) + if err != nil { + return errors.Wrap(err, "Cannot parse syslog configuration") + } + if syslogConfig.Addr == "" { + syslogConfig.Addr = "127.0.0.1" //do we want a usable or secure default ? + } + if syslogConfig.Port == 0 { + syslogConfig.Port = 514 + } + if syslogConfig.MaxMessageLen == 0 { + syslogConfig.MaxMessageLen = 2048 + } + if !validatePort(syslogConfig.Port) { + return fmt.Errorf("invalid port %d", syslogConfig.Port) + } + if !validateAddr(syslogConfig.Addr) { + return fmt.Errorf("invalid listen IP %s", syslogConfig.Addr) + } + s.config = syslogConfig + return nil +} + +func (s *SyslogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + c := make(chan syslogserver.SyslogMessage) + s.server = &syslogserver.SyslogServer{Logger: s.logger.WithField("syslog", "internal"), MaxMessageLen: s.config.MaxMessageLen} + s.server.SetChannel(c) + err := s.server.Listen(s.config.Addr, s.config.Port) + if err != nil { + return errors.Wrap(err, "could not start syslog server") + } + s.serverTomb = s.server.StartServer() + t.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/syslog/live") + return s.handleSyslogMsg(out, t, c) + }) + return nil +} + +func (s *SyslogSource) buildLogFromSyslog(ts time.Time, hostname string, + appname string, pid string, msg string) string { + ret := "" + if !ts.IsZero() { + ret += ts.Format("Jan 2 15:04:05") + } else { + s.logger.Tracef("%s - missing TS", msg) + ret += time.Now().UTC().Format("Jan 2 15:04:05") + } + if hostname != "" { + ret += " " + hostname + } else { + s.logger.Tracef("%s - missing host", msg) + ret += " unknownhost" + } + if appname != "" { + ret += " " + appname + } + if pid != "" { + ret += "[" + pid + "]: " + } else { + ret += ": " + } + if msg != "" { + ret += msg + } + return ret + +} + +func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c chan syslogserver.SyslogMessage) error { + killed := false + for { + select { + case <-t.Dying(): + if !killed { + s.logger.Info("Syslog datasource is dying") + s.serverTomb.Kill(nil) + killed = true + } + case <-s.serverTomb.Dead(): + s.logger.Info("Syslog server has exited") + return nil + case syslogLine := <-c: + var line string + var ts time.Time + + logger := s.logger.WithField("client", syslogLine.Client) + logger.Tracef("raw: %s", syslogLine) + linesReceived.With(prometheus.Labels{"source": syslogLine.Client}).Inc() + p := rfc3164.NewRFC3164Parser(rfc3164.WithCurrentYear()) + err := p.Parse(syslogLine.Message) + if err != nil { + logger.Debugf("could not parse as RFC3164 (%s)", err) + p2 := rfc5424.NewRFC5424Parser() + err = p2.Parse(syslogLine.Message) + if err != nil { + logger.Errorf("could not parse message: %s", err) + logger.Debugf("could not parse as RFC5424 (%s) : %s", err, syslogLine.Message) + continue + } + line = s.buildLogFromSyslog(p2.Timestamp, p2.Hostname, p2.Tag, p2.PID, p2.Message) + } else { + line = s.buildLogFromSyslog(p.Timestamp, p.Hostname, p.Tag, p.PID, p.Message) + } + + line = strings.TrimSuffix(line, "\n") + + l := types.Line{} + l.Raw = line + l.Module = s.GetName() + l.Labels = s.config.Labels + l.Time = ts + l.Src = syslogLine.Client + l.Process = true + if !s.config.UseTimeMachine { + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE} + } else { + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE} + } + } + } +} diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go new file mode 100644 index 0000000..1ac7051 --- /dev/null +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -0,0 +1,171 @@ +package syslogacquisition + +import ( + "fmt" + "net" + "runtime" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + + "github.com/stretchr/testify/assert" +) + +func TestConfigure(t *testing.T) { + tests := []struct { + config string + expectedErr string + }{ + { + config: ` +foobar: bla +source: syslog`, + expectedErr: "line 2: field foobar not found in type syslogacquisition.SyslogConfiguration", + }, + { + config: `source: syslog`, + expectedErr: "", + }, + { + config: ` +source: syslog +listen_port: asd`, + expectedErr: "cannot unmarshal !!str `asd` into int", + }, + { + config: ` +source: syslog +listen_port: 424242`, + expectedErr: "invalid port 424242", + }, + { + config: ` +source: syslog +listen_addr: 10.0.0`, + expectedErr: "invalid listen IP 10.0.0", + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "syslog", + }) + for _, test := range tests { + s := SyslogSource{} + err := s.Configure([]byte(test.config), subLogger) + cstest.AssertErrorContains(t, err, test.expectedErr) + } +} + +func writeToSyslog(logs []string) { + conn, err := net.Dial("udp", "127.0.0.1:4242") + if err != nil { + fmt.Printf("could not establish connection to syslog server : %s", err) + return + } + for _, log := range logs { + n, err := fmt.Fprint(conn, log) + if err != nil { + fmt.Printf("could not write to syslog server : %s", err) + return + } + if n != len(log) { + fmt.Printf("could not write to syslog server : %s", err) + return + } + } +} + +func TestStreamingAcquisition(t *testing.T) { + tests := []struct { + name string + config string + expectedErr string + logs []string + expectedLines int + }{ + { + name: "invalid msgs", + config: `source: syslog +listen_port: 4242 +listen_addr: 127.0.0.1`, + logs: []string{"foobar", "bla", "pouet"}, + }, + { + name: "RFC5424", + config: `source: syslog +listen_port: 4242 +listen_addr: 127.0.0.1`, + expectedLines: 2, + logs: []string{`<13>1 2021-05-18T11:58:40.828081+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, + `<13>1 2021-05-18T12:12:37.560695+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla2[foobar]`}, + }, + { + name: "RFC3164", + config: `source: syslog +listen_port: 4242 +listen_addr: 127.0.0.1`, + expectedLines: 3, + logs: []string{`<13>May 18 12:37:56 mantis sshd[49340]: blabla2[foobar]`, + `<13>May 18 12:37:56 mantis sshd[49340]: blabla2`, + `<13>May 18 12:37:56 mantis sshd: blabla2`, + `<13>May 18 12:37:56 mantis sshd`}, + }, + } + if runtime.GOOS != "windows" { + tests = append(tests, struct { + name string + config string + expectedErr string + logs []string + expectedLines int + }{ + name: "privileged port", + config: `source: syslog`, + expectedErr: "could not start syslog server: could not listen on port 514: listen udp 127.0.0.1:514: bind: permission denied", + }) + } + + for _, ts := range tests { + ts := ts + t.Run(ts.name, func(t *testing.T) { + subLogger := log.WithFields(log.Fields{ + "type": "syslog", + }) + s := SyslogSource{} + err := s.Configure([]byte(ts.config), subLogger) + if err != nil { + t.Fatalf("could not configure syslog source : %s", err) + } + tomb := tomb.Tomb{} + out := make(chan types.Event) + err = s.StreamingAcquisition(out, &tomb) + cstest.AssertErrorContains(t, err, ts.expectedErr) + if ts.expectedErr != "" { + return + } + if err != nil && ts.expectedErr == "" { + t.Fatalf("unexpected error while starting syslog server: %s", err) + return + } + + actualLines := 0 + go writeToSyslog(ts.logs) + READLOOP: + for { + select { + case <-out: + actualLines++ + case <-time.After(2 * time.Second): + break READLOOP + } + } + assert.Equal(t, ts.expectedLines, actualLines) + tomb.Kill(nil) + tomb.Wait() + }) + } +} diff --git a/pkg/acquisition/modules/wineventlog/wineventlog.go b/pkg/acquisition/modules/wineventlog/wineventlog.go new file mode 100644 index 0000000..92bbd7b --- /dev/null +++ b/pkg/acquisition/modules/wineventlog/wineventlog.go @@ -0,0 +1,59 @@ +//go:build !windows + +package wineventlogacquisition + +import ( + "errors" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" +) + +type WinEventLogSource struct{} + +func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry) error { + return nil +} + +func (w *WinEventLogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error { + return nil +} + +func (w *WinEventLogSource) GetMode() string { + return "" +} + +func (w *WinEventLogSource) SupportedModes() []string { + return []string{configuration.TAIL_MODE, configuration.CAT_MODE} +} + +func (w *WinEventLogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + return nil +} + +func (w *WinEventLogSource) GetMetrics() []prometheus.Collector { + return nil +} + +func (w *WinEventLogSource) GetAggregMetrics() []prometheus.Collector { + return nil +} + +func (w *WinEventLogSource) GetName() string { + return "wineventlog" +} + +func (w *WinEventLogSource) CanRun() error { + return errors.New("windows event log acquisition is only supported on Windows") +} + +func (w *WinEventLogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + return nil +} + +func (w *WinEventLogSource) Dump() interface{} { + return w +} diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_test.go new file mode 100644 index 0000000..a24a63e --- /dev/null +++ b/pkg/acquisition/modules/wineventlog/wineventlog_test.go @@ -0,0 +1,233 @@ +//go:build windows +// +build windows + +package wineventlogacquisition + +import ( + "runtime" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows/svc/eventlog" + "gopkg.in/tomb.v2" +) + +func TestBadConfiguration(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("Skipping test on non-windows OS") + } + tests := []struct { + config string + expectedErr string + }{ + { + config: `source: wineventlog +foobar: 42`, + expectedErr: "field foobar not found in type wineventlogacquisition.WinEventLogConfiguration", + }, + { + config: `source: wineventlog`, + expectedErr: "event_channel or xpath_query must be set", + }, + { + config: `source: wineventlog +event_channel: Security +event_level: blabla`, + expectedErr: "buildXpathQuery failed: invalid log level", + }, + { + config: `source: wineventlog +event_channel: Security +event_level: blabla`, + expectedErr: "buildXpathQuery failed: invalid log level", + }, + { + config: `source: wineventlog +event_channel: foo +xpath_query: test`, + expectedErr: "event_channel and xpath_query are mutually exclusive", + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "windowseventlog", + }) + for _, test := range tests { + f := WinEventLogSource{} + err := f.Configure([]byte(test.config), subLogger) + assert.Contains(t, err.Error(), test.expectedErr) + } +} + +func TestQueryBuilder(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("Skipping test on non-windows OS") + } + tests := []struct { + config string + expectedQuery string + expectedErr string + }{ + { + config: `source: wineventlog +event_channel: Security +event_level: Information`, + expectedQuery: "", + expectedErr: "", + }, + { + config: `source: wineventlog +event_channel: Security +event_level: Error +event_ids: + - 42`, + expectedQuery: "", + expectedErr: "", + }, + { + config: `source: wineventlog +event_channel: Security +event_level: Error +event_ids: + - 42 + - 43`, + expectedQuery: "", + expectedErr: "", + }, + { + config: `source: wineventlog +event_channel: Security`, + expectedQuery: "", + expectedErr: "", + }, + { + config: `source: wineventlog +event_channel: Security +event_level: bla`, + expectedQuery: "", + expectedErr: "invalid log level", + }, + } + subLogger := log.WithFields(log.Fields{ + "type": "windowseventlog", + }) + for _, test := range tests { + f := WinEventLogSource{} + f.Configure([]byte(test.config), subLogger) + q, err := f.buildXpathQuery() + if test.expectedErr != "" { + if err == nil { + t.Fatalf("expected error '%s' but got none", test.expectedErr) + } + assert.Contains(t, err.Error(), test.expectedErr) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expectedQuery, q) + } + } +} + +func TestLiveAcquisition(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("Skipping test on non-windows OS") + } + + tests := []struct { + config string + expectedLines []string + }{ + { + config: `source: wineventlog +xpath_query: | + + + + + `, + expectedLines: []string{ + "blabla", + "test", + "aaaa", + "bbbbb", + }, + }, + { + config: `source: wineventlog +xpath_query: | + asdfsdf`, + expectedLines: nil, + }, + { + config: `source: wineventlog +event_channel: Application +event_level: Information +event_ids: + - 42`, + expectedLines: []string{ + "testmessage", + }, + }, + { + config: `source: wineventlog +event_channel: Application +event_level: Information +event_ids: + - 43`, + expectedLines: nil, + }, + } + subLogger := log.WithFields(log.Fields{ + "type": "windowseventlog", + }) + + evthandler, err := eventlog.Open("Application") + + if err != nil { + t.Fatalf("failed to open event log: %s", err) + } + + for _, test := range tests { + to := &tomb.Tomb{} + c := make(chan types.Event) + f := WinEventLogSource{} + f.Configure([]byte(test.config), subLogger) + f.StreamingAcquisition(c, to) + time.Sleep(time.Second) + lines := test.expectedLines + go func() { + for _, line := range lines { + evthandler.Info(42, line) + } + }() + ticker := time.NewTicker(time.Second * 5) + linesRead := make([]string, 0) + READLOOP: + for { + select { + case <-ticker.C: + if test.expectedLines == nil { + break READLOOP + } + t.Fatalf("timeout") + case e := <-c: + + linesRead = append(linesRead, exprhelpers.XMLGetNodeValue(e.Line.Raw, "/Event/EventData[1]/Data")) + if len(linesRead) == len(lines) { + break READLOOP + } + } + } + if test.expectedLines == nil { + assert.Equal(t, 0, len(linesRead)) + } else { + assert.Equal(t, len(test.expectedLines), len(linesRead)) + assert.Equal(t, test.expectedLines, linesRead) + } + to.Kill(nil) + to.Wait() + } +} diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go new file mode 100644 index 0000000..7e7bb57 --- /dev/null +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -0,0 +1,320 @@ +package wineventlogacquisition + +import ( + "encoding/xml" + "errors" + "fmt" + "runtime" + "strings" + "syscall" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/google/winops/winlog" + "github.com/google/winops/winlog/wevtapi" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" +) + +type WinEventLogConfiguration struct { + configuration.DataSourceCommonCfg `yaml:",inline"` + EventChannel string `yaml:"event_channel"` + EventLevel string `yaml:"event_level"` + EventIDs []int `yaml:"event_ids"` + XPathQuery string `yaml:"xpath_query"` + EventFile string `yaml:"event_file"` + PrettyName string `yaml:"pretty_name"` +} + +type WinEventLogSource struct { + config WinEventLogConfiguration + logger *log.Entry + evtConfig *winlog.SubscribeConfig + query string + name string +} + +type QueryList struct { + Select Select `xml:"Query>Select"` +} + +type Select struct { + Path string `xml:"Path,attr"` + Query string `xml:",chardata"` +} + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_winevtlogsource_hits_total", + Help: "Total event that were read.", + }, + []string{"source"}) + +func logLevelToInt(logLevel string) ([]string, error) { + switch strings.ToUpper(logLevel) { + case "CRITICAL": + return []string{"1"}, nil + case "ERROR": + return []string{"2"}, nil + case "WARNING": + return []string{"3"}, nil + case "INFORMATION": + return []string{"0", "4"}, nil + case "VERBOSE": + return []string{"5"}, nil + default: + return nil, errors.New("invalid log level") + } +} + +//This is lifted from winops/winlog, but we only want to render the basic XML string, we don't need the extra fluff +func (w *WinEventLogSource) getXMLEvents(config *winlog.SubscribeConfig, publisherCache map[string]windows.Handle, resultSet windows.Handle, maxEvents int) ([]string, error) { + var events = make([]windows.Handle, maxEvents) + var returned uint32 + + // Get handles to events from the result set. + err := wevtapi.EvtNext( + resultSet, // Handle to query or subscription result set. + uint32(len(events)), // The number of events to attempt to retrieve. + &events[0], // Pointer to the array of event handles. + 2000, // Timeout in milliseconds to wait. + 0, // Reserved. Must be zero. + &returned) // The number of handles in the array that are set by the API. + if err == windows.ERROR_NO_MORE_ITEMS { + return nil, err + } else if err != nil { + return nil, fmt.Errorf("wevtapi.EvtNext failed: %v", err) + } + + // Event handles must be closed after they are returned by EvtNext whether or not we use them. + defer func() { + for _, event := range events[:returned] { + winlog.Close(event) + } + }() + + // Render events. + var renderedEvents []string + for _, event := range events[:returned] { + // Render the basic XML representation of the event. + fragment, err := winlog.RenderFragment(event, wevtapi.EvtRenderEventXml) + if err != nil { + w.logger.Errorf("Failed to render event with RenderFragment, skipping: %v", err) + continue + } + w.logger.Tracef("Rendered event: %s", fragment) + renderedEvents = append(renderedEvents, fragment) + } + return renderedEvents, err +} + +func (w *WinEventLogSource) buildXpathQuery() (string, error) { + var query string + queryComponents := [][]string{} + if w.config.EventIDs != nil { + eventIds := []string{} + for _, id := range w.config.EventIDs { + eventIds = append(eventIds, fmt.Sprintf("EventID=%d", id)) + } + queryComponents = append(queryComponents, eventIds) + } + if w.config.EventLevel != "" { + levels, err := logLevelToInt(w.config.EventLevel) + logLevels := []string{} + if err != nil { + return "", err + } + for _, level := range levels { + logLevels = append(logLevels, fmt.Sprintf("Level=%s", level)) + } + queryComponents = append(queryComponents, logLevels) + } + if len(queryComponents) > 0 { + andList := []string{} + for _, component := range queryComponents { + andList = append(andList, fmt.Sprintf("(%s)", strings.Join(component, " or "))) + } + query = fmt.Sprintf("*[System[%s]]", strings.Join(andList, " and ")) + } else { + query = "*" + } + queryList := QueryList{Select: Select{Path: w.config.EventChannel, Query: query}} + xpathQuery, err := xml.Marshal(queryList) + if err != nil { + w.logger.Errorf("Marshal failed: %v", err) + return "", err + } + w.logger.Debugf("xpathQuery: %s", xpathQuery) + return string(xpathQuery), nil +} + +func (w *WinEventLogSource) getEvents(out chan types.Event, t *tomb.Tomb) error { + subscription, err := winlog.Subscribe(w.evtConfig) + if err != nil { + w.logger.Errorf("Failed to subscribe to event log: %s", err) + return err + } + defer winlog.Close(subscription) + publisherCache := make(map[string]windows.Handle) + defer func() { + for _, h := range publisherCache { + winlog.Close(h) + } + }() + for { + select { + case <-t.Dying(): + w.logger.Infof("wineventlog is dying") + return nil + default: + status, err := windows.WaitForSingleObject(w.evtConfig.SignalEvent, 1000) + if err != nil { + w.logger.Errorf("WaitForSingleObject failed: %s", err) + return err + } + if status == syscall.WAIT_OBJECT_0 { + renderedEvents, err := w.getXMLEvents(w.evtConfig, publisherCache, subscription, 500) + if err == windows.ERROR_NO_MORE_ITEMS { + windows.ResetEvent(w.evtConfig.SignalEvent) + } else if err != nil { + w.logger.Errorf("getXMLEvents failed: %v", err) + continue + } + for _, event := range renderedEvents { + linesRead.With(prometheus.Labels{"source": w.name}).Inc() + l := types.Line{} + l.Raw = event + l.Module = w.GetName() + l.Labels = w.config.Labels + l.Time = time.Now() + l.Src = w.name + l.Process = true + if !w.config.UseTimeMachine { + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.LIVE} + } else { + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: leaky.TIMEMACHINE} + } + } + } + + } + } +} + +func (w *WinEventLogSource) generateConfig(query string) (*winlog.SubscribeConfig, error) { + var config winlog.SubscribeConfig + var err error + + // Create a subscription signaler. + config.SignalEvent, err = windows.CreateEvent( + nil, // Default security descriptor. + 1, // Manual reset. + 1, // Initial state is signaled. + nil) // Optional name. + if err != nil { + return &config, fmt.Errorf("windows.CreateEvent failed: %v", err) + } + config.Flags = wevtapi.EvtSubscribeToFutureEvents + config.Query, err = syscall.UTF16PtrFromString(query) + if err != nil { + return &config, fmt.Errorf("syscall.UTF16PtrFromString failed: %v", err) + } + + return &config, nil +} + +func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry) error { + + config := WinEventLogConfiguration{} + w.logger = logger + err := yaml.UnmarshalStrict(yamlConfig, &config) + + if err != nil { + return fmt.Errorf("unable to parse configuration: %v", err) + } + + if config.EventChannel != "" && config.XPathQuery != "" { + return fmt.Errorf("event_channel and xpath_query are mutually exclusive") + } + + if config.EventChannel == "" && config.XPathQuery == "" { + return fmt.Errorf("event_channel or xpath_query must be set") + } + + config.Mode = configuration.TAIL_MODE + w.config = config + + if config.XPathQuery != "" { + w.query = config.XPathQuery + } else { + w.query, err = w.buildXpathQuery() + if err != nil { + return fmt.Errorf("buildXpathQuery failed: %v", err) + } + } + + w.evtConfig, err = w.generateConfig(w.query) + if err != nil { + return err + } + + if config.PrettyName != "" { + w.name = config.PrettyName + } else { + w.name = w.query + } + + return nil +} + +func (w *WinEventLogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry) error { + return nil +} + +func (w *WinEventLogSource) GetMode() string { + return w.config.Mode +} + +func (w *WinEventLogSource) SupportedModes() []string { + return []string{configuration.TAIL_MODE} +} + +func (w *WinEventLogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + return nil +} + +func (w *WinEventLogSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (w *WinEventLogSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (w *WinEventLogSource) GetName() string { + return "wineventlog" +} + +func (w *WinEventLogSource) CanRun() error { + if runtime.GOOS != "windows" { + return errors.New("windows event log acquisition is only supported on Windows") + } + return nil +} + +func (w *WinEventLogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + t.Go(func() error { + defer types.CatchPanic("crowdsec/acquis/wineventlog/streaming") + return w.getEvents(out, t) + }) + return nil +} + +func (w *WinEventLogSource) Dump() interface{} { + return w +} diff --git a/pkg/acquisition/test_files/backward_compat.yaml b/pkg/acquisition/test_files/backward_compat.yaml new file mode 100644 index 0000000..d56bf9a --- /dev/null +++ b/pkg/acquisition/test_files/backward_compat.yaml @@ -0,0 +1,15 @@ +filename: /tmp/test.log +labels: + type: syslog +--- +filenames: + - /tmp/test*.log +labels: + type: syslog +--- +# to be uncommented when we reimplement back journalctl +# journalctl_filter: +# - "_SYSTEMD_UNIT=ssh.service" +# labels: +# type: syslog +--- diff --git a/pkg/acquisition/test_files/bad_filetype.yaml b/pkg/acquisition/test_files/bad_filetype.yaml new file mode 100644 index 0000000..c7bd131 --- /dev/null +++ b/pkg/acquisition/test_files/bad_filetype.yaml @@ -0,0 +1,5 @@ +type: file +filenames: /tmp/tltlt.log #it should be an array +labels: + type: syslog + \ No newline at end of file diff --git a/pkg/acquisition/test_files/bad_source.yaml b/pkg/acquisition/test_files/bad_source.yaml new file mode 100644 index 0000000..fdf6b32 --- /dev/null +++ b/pkg/acquisition/test_files/bad_source.yaml @@ -0,0 +1,4 @@ +source: does_not_exist +labels: + type: syslog +foobar: toto diff --git a/pkg/acquisition/test_files/badyaml.yaml b/pkg/acquisition/test_files/badyaml.yaml new file mode 100644 index 0000000..7545af9 --- /dev/null +++ b/pkg/acquisition/test_files/badyaml.yaml @@ -0,0 +1 @@ + 0 { + URI = fmt.Sprintf("%s?%s", u, params.Encode()) + } else { + URI = u + } + + req, err := s.client.NewRequest(http.MethodGet, URI, nil) + if err != nil { + return nil, nil, errors.Wrap(err, "building request") + } + + resp, err := s.client.Do(ctx, req, &alerts) + if err != nil { + return nil, resp, errors.Wrap(err, "performing request") + } + return &alerts, resp, nil +} + +// to demo query arguments +func (s *AlertsService) Delete(ctx context.Context, opts AlertsDeleteOpts) (*models.DeleteAlertsResponse, *Response, error) { + var alerts models.DeleteAlertsResponse + params, err := qs.Values(opts) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("%s/alerts?%s", s.client.URLPrefix, params.Encode()) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &alerts) + if err != nil { + return nil, resp, err + } + return &alerts, resp, nil +} + +func (s *AlertsService) DeleteOne(ctx context.Context, alert_id string) (*models.DeleteAlertsResponse, *Response, error) { + var alerts models.DeleteAlertsResponse + u := fmt.Sprintf("%s/alerts/%s", s.client.URLPrefix, alert_id) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &alerts) + if err != nil { + return nil, resp, err + } + return &alerts, resp, nil +} + +func (s *AlertsService) GetByID(ctx context.Context, alertID int) (*models.Alert, *Response, error) { + var alert models.Alert + u := fmt.Sprintf("%s/alerts/%d", s.client.URLPrefix, alertID) + + req, err := s.client.NewRequest(http.MethodGet, u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &alert) + if err != nil { + return nil, nil, err + } + return &alert, resp, nil +} diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go new file mode 100644 index 0000000..8043536 --- /dev/null +++ b/pkg/apiclient/alerts_service_test.go @@ -0,0 +1,494 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + "net/url" + "reflect" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAlertsListAsMachine(t *testing.T) { + log.SetLevel(log.DebugLevel) + + mux, urlx, teardown := setup() + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + log.Printf("URL is %s", urlx) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + log.Fatalf("parsing api url: %s", apiURL) + } + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + + if err != nil { + log.Fatalf("new api client: %s", err) + } + + defer teardown() + + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + + if r.URL.RawQuery == "ip=1.2.3.4" { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `null`) + return + } + + testMethod(t, r, "GET") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, `[ + {"capacity":5,"created_at":"2020-11-28T10:20:47+01:00", + "decisions":[ + {"duration":"59m49.264032632s", + "id":1, + "origin":"crowdsec", + "scenario":"crowdsecurity/ssh-bf", + "scope":"Ip", + "simulated":false, + "type":"ban", + "value":"1.1.1.172"} + ], + "events":[ + {"meta":[ + {"key":"target_user","value":"netflix"}, + {"key":"service","value":"ssh"} + ], + "timestamp":"2020-11-28 10:20:46 +0000 UTC"}, + {"meta":[ + {"key":"target_user","value":"netflix"}, + {"key":"service","value":"ssh"} + ], + "timestamp":"2020-11-28 10:20:46 +0000 UTC"} + ], + "events_count":6, + "id":1, + "labels":null, + "leakspeed":"10s", + "machine_id":"test", + "message":"Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 2.920062ms) at 2020-11-28 10:20:46.845619968 +0100 CET m=+5.903899761", + "scenario":"crowdsecurity/ssh-bf", + "scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version":"0.1", + "simulated":false, + "source":{ + "as_name":"Cloudflare Inc", + "cn":"AU", + "ip":"1.1.1.172", + "latitude":-37.7, + "longitude":145.1833, + "range":"1.1.1.0/24", + "scope":"Ip", + "value":"1.1.1.172" + }, + "start_at":"2020-11-28 10:20:46.842701127 +0100 +0100", + "stop_at":"2020-11-28 10:20:46.845621385 +0100 +0100" + } + ]`) + }) + + tcapacity := int32(5) + tduration := "59m49.264032632s" + torigin := "crowdsec" + tscenario := "crowdsecurity/ssh-bf" + tscope := "Ip" + ttype := "ban" + tvalue := "1.1.1.172" + ttimestamp := "2020-11-28 10:20:46 +0000 UTC" + teventscount := int32(6) + tleakspeed := "10s" + tmessage := "Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 2.920062ms) at 2020-11-28 10:20:46.845619968 +0100 CET m=+5.903899761" + tscenariohash := "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f" + tscenarioversion := "0.1" + tstartat := "2020-11-28 10:20:46.842701127 +0100 +0100" + tstopat := "2020-11-28 10:20:46.845621385 +0100 +0100" + + expected := models.GetAlertsResponse{ + &models.Alert{ + Capacity: &tcapacity, + CreatedAt: "2020-11-28T10:20:47+01:00", + Decisions: []*models.Decision{ + &models.Decision{ + Duration: &tduration, + ID: 1, + Origin: &torigin, + Scenario: &tscenario, + + Scope: &tscope, + Simulated: new(bool), //false, + Type: &ttype, + Value: &tvalue, + }, + }, + Events: []*models.Event{ + &models.Event{ + Meta: models.Meta{ + &models.MetaItems0{ + Key: "target_user", + Value: "netflix", + }, + &models.MetaItems0{ + Key: "service", + Value: "ssh", + }, + }, + Timestamp: &ttimestamp, + }, + &models.Event{ + Meta: models.Meta{ + &models.MetaItems0{ + Key: "target_user", + Value: "netflix", + }, + &models.MetaItems0{ + Key: "service", + Value: "ssh", + }, + }, + Timestamp: &ttimestamp, + }, + }, + EventsCount: &teventscount, + ID: 1, + Leakspeed: &tleakspeed, + MachineID: "test", + Message: &tmessage, + Remediation: false, + Scenario: &tscenario, + ScenarioHash: &tscenariohash, + ScenarioVersion: &tscenarioversion, + Simulated: new(bool), //(false), + Source: &models.Source{ + AsName: "Cloudflare Inc", + AsNumber: "", + Cn: "AU", + IP: "1.1.1.172", + Latitude: -37.7, + Longitude: 145.1833, + Range: "1.1.1.0/24", + Scope: &tscope, + Value: &tvalue, + }, + StartAt: &tstartat, + StopAt: &tstopat, + }, + } + + //log.Debugf("data : -> %s", spew.Sdump(alerts)) + //log.Debugf("resp : -> %s", spew.Sdump(resp)) + //log.Debugf("expected : -> %s", spew.Sdump(expected)) + //first one returns data + alerts, resp, err := client.Alerts.List(context.Background(), AlertsListOpts{}) + if err != nil { + log.Errorf("test Unable to list alerts : %+v", err) + } + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + + if !reflect.DeepEqual(*alerts, expected) { + t.Errorf("client.Alerts.List returned %+v, want %+v", resp, expected) + } + //this one doesn't + filter := AlertsListOpts{IPEquals: new(string)} + *filter.IPEquals = "1.2.3.4" + alerts, resp, err = client.Alerts.List(context.Background(), filter) + if err != nil { + log.Errorf("test Unable to list alerts : %+v", err) + } + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + assert.Equal(t, 0, len(*alerts)) +} + +func TestAlertsGetAsMachine(t *testing.T) { + log.SetLevel(log.DebugLevel) + + mux, urlx, teardown := setup() + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + log.Printf("URL is %s", urlx) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + log.Fatalf("parsing api url: %s", apiURL) + } + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + + if err != nil { + log.Fatalf("new api client: %s", err) + } + + defer teardown() + mux.HandleFunc("/alerts/2", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusNotFound) + fmt.Fprintf(w, `{"message":"object not found"}`) + }) + + mux.HandleFunc("/alerts/1", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, `{"capacity":5,"created_at":"2020-11-28T10:20:47+01:00", + "decisions":[ + {"duration":"59m49.264032632s", + "end_ip":16843180, + "id":1, + "origin":"crowdsec", + "scenario":"crowdsecurity/ssh-bf", + "scope":"Ip", + "simulated":false, + "start_ip":16843180, + "type":"ban", + "value":"1.1.1.172"} + ], + "events":[ + {"meta":[ + {"key":"target_user","value":"netflix"}, + {"key":"service","value":"ssh"} + ], + "timestamp":"2020-11-28 10:20:46 +0000 UTC"}, + {"meta":[ + {"key":"target_user","value":"netflix"}, + {"key":"service","value":"ssh"} + ], + "timestamp":"2020-11-28 10:20:46 +0000 UTC"} + ], + "events_count":6, + "id":1, + "labels":null, + "leakspeed":"10s", + "machine_id":"test", + "message":"Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 2.920062ms) at 2020-11-28 10:20:46.845619968 +0100 CET m=+5.903899761", + "scenario":"crowdsecurity/ssh-bf", + "scenario_hash":"4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version":"0.1", + "simulated":false, + "source":{ + "as_name":"Cloudflare Inc", + "cn":"AU", + "ip":"1.1.1.172", + "latitude":-37.7, + "longitude":145.1833, + "range":"1.1.1.0/24", + "scope":"Ip", + "value":"1.1.1.172" + }, + "start_at":"2020-11-28 10:20:46.842701127 +0100 +0100", + "stop_at":"2020-11-28 10:20:46.845621385 +0100 +0100" + }`) + }) + + tcapacity := int32(5) + tduration := "59m49.264032632s" + torigin := "crowdsec" + tscenario := "crowdsecurity/ssh-bf" + tscope := "Ip" + ttype := "ban" + tvalue := "1.1.1.172" + ttimestamp := "2020-11-28 10:20:46 +0000 UTC" + teventscount := int32(6) + tleakspeed := "10s" + tmessage := "Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 2.920062ms) at 2020-11-28 10:20:46.845619968 +0100 CET m=+5.903899761" + tscenariohash := "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f" + tscenarioversion := "0.1" + tstartat := "2020-11-28 10:20:46.842701127 +0100 +0100" + tstopat := "2020-11-28 10:20:46.845621385 +0100 +0100" + + expected := &models.Alert{ + Capacity: &tcapacity, + CreatedAt: "2020-11-28T10:20:47+01:00", + Decisions: []*models.Decision{ + &models.Decision{ + Duration: &tduration, + ID: 1, + Origin: &torigin, + Scenario: &tscenario, + + Scope: &tscope, + Simulated: new(bool), //false, + Type: &ttype, + Value: &tvalue, + }, + }, + Events: []*models.Event{ + &models.Event{ + Meta: models.Meta{ + &models.MetaItems0{ + Key: "target_user", + Value: "netflix", + }, + &models.MetaItems0{ + Key: "service", + Value: "ssh", + }, + }, + Timestamp: &ttimestamp, + }, + &models.Event{ + Meta: models.Meta{ + &models.MetaItems0{ + Key: "target_user", + Value: "netflix", + }, + &models.MetaItems0{ + Key: "service", + Value: "ssh", + }, + }, + Timestamp: &ttimestamp, + }, + }, + EventsCount: &teventscount, + ID: 1, + Leakspeed: &tleakspeed, + MachineID: "test", + Message: &tmessage, + Remediation: false, + Scenario: &tscenario, + ScenarioHash: &tscenariohash, + ScenarioVersion: &tscenarioversion, + Simulated: new(bool), //(false), + Source: &models.Source{ + AsName: "Cloudflare Inc", + AsNumber: "", + Cn: "AU", + IP: "1.1.1.172", + Latitude: -37.7, + Longitude: 145.1833, + Range: "1.1.1.0/24", + Scope: &tscope, + Value: &tvalue, + }, + StartAt: &tstartat, + StopAt: &tstopat, + } + + alerts, resp, err := client.Alerts.GetByID(context.Background(), 1) + require.NoError(t, err) + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + + if !reflect.DeepEqual(*alerts, *expected) { + t.Errorf("client.Alerts.List returned %+v, want %+v", resp, expected) + } + + //fail + _, _, err = client.Alerts.GetByID(context.Background(), 2) + assert.Contains(t, fmt.Sprintf("%s", err), "API error: object not found") + +} + +func TestAlertsCreateAsMachine(t *testing.T) { + log.SetLevel(log.DebugLevel) + + mux, urlx, teardown := setup() + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`["3"]`)) + }) + log.Printf("URL is %s", urlx) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + log.Fatalf("parsing api url: %s", apiURL) + } + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + + if err != nil { + log.Fatalf("new api client: %s", err) + } + + defer teardown() + alert := models.AddAlertsRequest{} + alerts, resp, err := client.Alerts.Add(context.Background(), alert) + require.NoError(t, err) + expected := &models.AddAlertsResponse{"3"} + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + if !reflect.DeepEqual(*alerts, *expected) { + t.Errorf("client.Alerts.List returned %+v, want %+v", resp, expected) + } +} + +func TestAlertsDeleteAsMachine(t *testing.T) { + log.SetLevel(log.DebugLevel) + + mux, urlx, teardown := setup() + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + assert.Equal(t, r.URL.RawQuery, "ip=1.2.3.4") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"message":"0 deleted alerts"}`)) + }) + log.Printf("URL is %s", urlx) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + log.Fatalf("parsing api url: %s", apiURL) + } + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + + if err != nil { + log.Fatalf("new api client: %s", err) + } + + defer teardown() + alert := AlertsDeleteOpts{IPEquals: new(string)} + *alert.IPEquals = "1.2.3.4" + alerts, resp, err := client.Alerts.Delete(context.Background(), alert) + require.NoError(t, err) + + expected := &models.DeleteAlertsResponse{NbDeleted: ""} + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + if !reflect.DeepEqual(*alerts, *expected) { + t.Errorf("client.Alerts.List returned %+v, want %+v", resp, expected) + } +} diff --git a/pkg/apiclient/auth.go b/pkg/apiclient/auth.go new file mode 100644 index 0000000..87d725e --- /dev/null +++ b/pkg/apiclient/auth.go @@ -0,0 +1,229 @@ +package apiclient + +import ( + "bytes" + "encoding/json" + "time" + + //"errors" + "fmt" + "io" + "net/http" + "net/http/httputil" + "net/url" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + //"google.golang.org/appengine/log" +) + +type APIKeyTransport struct { + APIKey string + // Transport is the underlying HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + Transport http.RoundTripper + URL *url.URL + VersionPrefix string + UserAgent string +} + +// RoundTrip implements the RoundTripper interface. +func (t *APIKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.APIKey == "" { + return nil, errors.New("APIKey is empty") + } + + // We must make a copy of the Request so + // that we don't modify the Request we were given. This is required by the + // specification of http.RoundTripper. + req = cloneRequest(req) + req.Header.Add("X-Api-Key", t.APIKey) + if t.UserAgent != "" { + req.Header.Add("User-Agent", t.UserAgent) + } + log.Debugf("req-api: %s %s", req.Method, req.URL.String()) + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpRequest(req, true) + log.Tracef("auth-api request: %s", string(dump)) + } + // Make the HTTP request. + resp, err := t.transport().RoundTrip(req) + if err != nil { + log.Errorf("auth-api: auth with api key failed return nil response, error: %s", err) + return resp, err + } + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpResponse(resp, true) + log.Tracef("auth-api response: %s", string(dump)) + } + + log.Debugf("resp-api: http %d", resp.StatusCode) + + return resp, err +} + +func (t *APIKeyTransport) Client() *http.Client { + return &http.Client{Transport: t} +} + +func (t *APIKeyTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +type JWTTransport struct { + MachineID *string + Password *strfmt.Password + token string + Expiration time.Time + Scenarios []string + URL *url.URL + VersionPrefix string + UserAgent string + // Transport is the underlying HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + Transport http.RoundTripper + UpdateScenario func() ([]string, error) +} + +func (t *JWTTransport) refreshJwtToken() error { + var err error + if t.UpdateScenario != nil { + t.Scenarios, err = t.UpdateScenario() + if err != nil { + return fmt.Errorf("can't update scenario list: %s", err) + } + log.Debugf("scenarios list updated for '%s'", *t.MachineID) + } + + var auth = models.WatcherAuthRequest{ + MachineID: t.MachineID, + Password: t.Password, + Scenarios: t.Scenarios, + } + + var response models.WatcherAuthResponse + + /* + we don't use the main client, so let's build the body + */ + var buf io.ReadWriter = &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err = enc.Encode(auth) + if err != nil { + return errors.Wrap(err, "could not encode jwt auth body") + } + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s/watchers/login", t.URL, t.VersionPrefix), buf) + if err != nil { + return errors.Wrap(err, "could not create request") + } + req.Header.Add("Content-Type", "application/json") + client := &http.Client{} + if t.UserAgent != "" { + req.Header.Add("User-Agent", t.UserAgent) + } + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpRequest(req, true) + log.Tracef("auth-jwt request: %s", string(dump)) + } + + log.Debugf("auth-jwt(auth): %s %s", req.Method, req.URL.String()) + + resp, err := client.Do(req) + if err != nil { + return errors.Wrap(err, "could not get jwt token") + } + log.Debugf("auth-jwt : http %d", resp.StatusCode) + + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpResponse(resp, true) + log.Tracef("auth-jwt response: %s", string(dump)) + } + + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + log.Debugf("received response status %q when fetching %v", resp.Status, req.URL) + err = CheckResponse(resp) + if err != nil { + return err + } + } + + if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { + return errors.Wrap(err, "unable to decode response") + } + if err := t.Expiration.UnmarshalText([]byte(response.Expire)); err != nil { + return errors.Wrap(err, "unable to parse jwt expiration") + } + t.token = response.Token + + log.Debugf("token %s will expire on %s", t.token, t.Expiration.String()) + return nil +} + +// RoundTrip implements the RoundTripper interface. +func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.token == "" || t.Expiration.Add(-time.Minute).Before(time.Now().UTC()) { + if err := t.refreshJwtToken(); err != nil { + return nil, err + } + } + + // We must make a copy of the Request so + // that we don't modify the Request we were given. This is required by the + // specification of http.RoundTripper. + req = cloneRequest(req) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.token)) + log.Debugf("req-jwt: %s %s", req.Method, req.URL.String()) + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpRequest(req, true) + log.Tracef("req-jwt: %s", string(dump)) + } + if t.UserAgent != "" { + req.Header.Add("User-Agent", t.UserAgent) + } + // Make the HTTP request. + resp, err := t.transport().RoundTrip(req) + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpResponse(resp, true) + log.Tracef("resp-jwt: %s (err:%v)", string(dump), err) + } + if err != nil || resp.StatusCode == 401 { + /*we had an error (network error for example, or 401 because token is refused), reset the token ?*/ + t.token = "" + return resp, errors.Wrapf(err, "performing jwt auth") + } + log.Debugf("resp-jwt: %d", resp.StatusCode) + return resp, nil +} + +func (t *JWTTransport) Client() *http.Client { + return &http.Client{Transport: t} +} + +func (t *JWTTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +// cloneRequest returns a clone of the provided *http.Request. The clone is a +// shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} diff --git a/pkg/apiclient/auth_service.go b/pkg/apiclient/auth_service.go new file mode 100644 index 0000000..bf02738 --- /dev/null +++ b/pkg/apiclient/auth_service.go @@ -0,0 +1,80 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +// type ApiAlerts service + +type AuthService service + +// Don't add it to the models, as they are used with LAPI, but the enroll endpoint is specific to CAPI +type enrollRequest struct { + EnrollKey string `json:"attachment_key"` + Name string `json:"name"` + Tags []string `json:"tags"` + Overwrite bool `json:"overwrite"` +} + +func (s *AuthService) UnregisterWatcher(ctx context.Context) (*Response, error) { + + u := fmt.Sprintf("%s/watchers", s.client.URLPrefix) + req, err := s.client.NewRequest(http.MethodDelete, u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +func (s *AuthService) RegisterWatcher(ctx context.Context, registration models.WatcherRegistrationRequest) (*Response, error) { + + u := fmt.Sprintf("%s/watchers", s.client.URLPrefix) + + req, err := s.client.NewRequest(http.MethodPost, u, ®istration) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +func (s *AuthService) AuthenticateWatcher(ctx context.Context, auth models.WatcherAuthRequest) (*Response, error) { + u := fmt.Sprintf("%s/watchers/login", s.client.URLPrefix) + req, err := s.client.NewRequest(http.MethodPost, u, &auth) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +func (s *AuthService) EnrollWatcher(ctx context.Context, enrollKey string, name string, tags []string, overwrite bool) (*Response, error) { + u := fmt.Sprintf("%s/watchers/enroll", s.client.URLPrefix) + req, err := s.client.NewRequest(http.MethodPost, u, &enrollRequest{EnrollKey: enrollKey, Name: name, Tags: tags, Overwrite: overwrite}) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go new file mode 100644 index 0000000..1eb5138 --- /dev/null +++ b/pkg/apiclient/auth_service_test.go @@ -0,0 +1,238 @@ +package apiclient + +import ( + "bytes" + "context" + "fmt" + "net/http" + "net/url" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestWatcherAuth(t *testing.T) { + + log.SetLevel(log.DebugLevel) + + mux, urlx, teardown := setup() + defer teardown() + //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(r.Body) + newStr := buf.String() + log.Printf("--> %s", newStr) + if newStr == `{"machine_id":"test_login","password":"test_password","scenarios":["crowdsecurity/test"]} +` { + log.Printf("ok cool") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{"code":200,"expire":"2029-11-30T14:14:24+01:00","token":"toto"}`) + } else { + w.WriteHeader(http.StatusForbidden) + log.Printf("badbad") + fmt.Fprintf(w, `{"message":"access forbidden"}`) + } + }) + log.Printf("URL is %s", urlx) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + //ok auth + mycfg := &Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + Scenarios: []string{"crowdsecurity/test"}, + } + client, err := NewClient(mycfg) + + if err != nil { + t.Fatalf("new api client: %s", err) + } + + _, err = client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + MachineID: &mycfg.MachineID, + Password: &mycfg.Password, + Scenarios: mycfg.Scenarios, + }) + if err != nil { + t.Fatalf("unexpect auth err 0: %s", err) + } + + //bad auth + mycfg = &Config{ + MachineID: "BADtest_login", + Password: "BADtest_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + Scenarios: []string{"crowdsecurity/test"}, + } + client, err = NewClient(mycfg) + + if err != nil { + t.Fatalf("new api client: %s", err) + } + + _, err = client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + MachineID: &mycfg.MachineID, + Password: &mycfg.Password, + }) + assert.Contains(t, err.Error(), "API error: access forbidden") + +} + +func TestWatcherRegister(t *testing.T) { + + log.SetLevel(log.DebugLevel) + + mux, urlx, teardown := setup() + defer teardown() + //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + + mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(r.Body) + newStr := buf.String() + assert.Equal(t, newStr, `{"machine_id":"test_login","password":"test_password"} +`) + w.WriteHeader(http.StatusOK) + }) + log.Printf("URL is %s", urlx) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + client, err := RegisterClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }, &http.Client{}) + if err != nil { + t.Fatalf("while registering client : %s", err) + } + log.Printf("->%T", client) +} + +func TestWatcherUnregister(t *testing.T) { + + log.SetLevel(log.DebugLevel) + + mux, urlx, teardown := setup() + defer teardown() + //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + + mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + assert.Equal(t, r.ContentLength, int64(0)) + w.WriteHeader(http.StatusOK) + }) + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(r.Body) + newStr := buf.String() + if newStr == `{"machine_id":"test_login","password":"test_password","scenarios":["crowdsecurity/test"]} +` { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{"code":200,"expire":"2029-11-30T14:14:24+01:00","token":"toto"}`) + } else { + w.WriteHeader(http.StatusForbidden) + fmt.Fprintf(w, `{"message":"access forbidden"}`) + } + }) + + log.Printf("URL is %s", urlx) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + mycfg := &Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + Scenarios: []string{"crowdsecurity/test"}, + } + client, err := NewClient(mycfg) + + if err != nil { + t.Fatalf("new api client: %s", err) + } + _, err = client.Auth.UnregisterWatcher(context.Background()) + if err != nil { + t.Fatalf("while registering client : %s", err) + } + log.Printf("->%T", client) +} + +func TestWatcherEnroll(t *testing.T) { + log.SetLevel(log.DebugLevel) + + mux, urlx, teardown := setup() + defer teardown() + + mux.HandleFunc("/watchers/enroll", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(r.Body) + newStr := buf.String() + log.Debugf("body -> %s", newStr) + if newStr == `{"attachment_key":"goodkey","name":"","tags":[],"overwrite":false} +` { + log.Print("good key") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{"statusCode": 200, "message": "OK"}`) + } else { + log.Print("bad key") + w.WriteHeader(http.StatusForbidden) + fmt.Fprintf(w, `{"message":"the attachment key provided is not valid"}`) + } + }) + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{"code":200,"expire":"2029-11-30T14:14:24+01:00","token":"toto"}`) + }) + log.Printf("URL is %s", urlx) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + mycfg := &Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + Scenarios: []string{"crowdsecurity/test"}, + } + client, err := NewClient(mycfg) + + if err != nil { + t.Fatalf("new api client: %s", err) + } + + _, err = client.Auth.EnrollWatcher(context.Background(), "goodkey", "", []string{}, false) + if err != nil { + t.Fatalf("unexpect enroll err: %s", err) + } + + _, err = client.Auth.EnrollWatcher(context.Background(), "badkey", "", []string{}, false) + assert.Contains(t, err.Error(), "the attachment key provided is not valid") +} diff --git a/pkg/apiclient/auth_test.go b/pkg/apiclient/auth_test.go new file mode 100644 index 0000000..f28a0ea --- /dev/null +++ b/pkg/apiclient/auth_test.go @@ -0,0 +1,86 @@ +package apiclient + +import ( + "context" + "net/http" + "net/url" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestApiAuth(t *testing.T) { + log.SetLevel(log.TraceLevel) + + mux, urlx, teardown := setup() + mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + if r.Header.Get("X-Api-Key") == "ixu" { + assert.Equal(t, r.URL.RawQuery, "ip=1.2.3.4") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`null`)) + } else { + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(`{"message":"access forbidden"}`)) + } + }) + log.Printf("URL is %s", urlx) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + defer teardown() + + //ok no answer + auth := &APIKeyTransport{ + APIKey: "ixu", + } + + newcli, err := NewDefaultClient(apiURL, "v1", "toto", auth.Client()) + if err != nil { + t.Fatalf("new api client: %s", err) + } + + alert := DecisionsListOpts{IPEquals: new(string)} + *alert.IPEquals = "1.2.3.4" + _, resp, err := newcli.Decisions.List(context.Background(), alert) + require.NoError(t, err) + + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + + //ko bad token + auth = &APIKeyTransport{ + APIKey: "bad", + } + + newcli, err = NewDefaultClient(apiURL, "v1", "toto", auth.Client()) + if err != nil { + t.Fatalf("new api client: %s", err) + } + + _, resp, err = newcli.Decisions.List(context.Background(), alert) + + log.Infof("--> %s", err) + if resp.Response.StatusCode != http.StatusForbidden { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + assert.Contains(t, err.Error(), "API error: access forbidden") + //ko empty token + auth = &APIKeyTransport{} + newcli, err = NewDefaultClient(apiURL, "v1", "toto", auth.Client()) + if err != nil { + t.Fatalf("new api client: %s", err) + } + + _, _, err = newcli.Decisions.List(context.Background(), alert) + require.Error(t, err) + + log.Infof("--> %s", err) + assert.Contains(t, err.Error(), "APIKey is empty") + +} diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go new file mode 100644 index 0000000..f6cc738 --- /dev/null +++ b/pkg/apiclient/client.go @@ -0,0 +1,179 @@ +package apiclient + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/pkg/errors" +) + +var ( + InsecureSkipVerify = false + Cert *tls.Certificate + CaCertPool *x509.CertPool +) + +type ApiClient struct { + /*The http client used to make requests*/ + client *http.Client + /*Reuse a single struct instead of allocating one for each service on the heap.*/ + common service + /*config stuff*/ + BaseURL *url.URL + URLPrefix string + UserAgent string + /*exposed Services*/ + Decisions *DecisionsService + Alerts *AlertsService + Auth *AuthService + Metrics *MetricsService + Signal *SignalService + HeartBeat *HeartBeatService +} + +type service struct { + client *ApiClient +} + +func NewClient(config *Config) (*ApiClient, error) { + t := &JWTTransport{ + MachineID: &config.MachineID, + Password: &config.Password, + Scenarios: config.Scenarios, + URL: config.URL, + UserAgent: config.UserAgent, + VersionPrefix: config.VersionPrefix, + UpdateScenario: config.UpdateScenario, + } + tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} + if Cert != nil { + tlsconfig.RootCAs = CaCertPool + tlsconfig.Certificates = []tls.Certificate{*Cert} + } + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig + c := &ApiClient{client: t.Client(), BaseURL: config.URL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} + c.common.client = c + c.Decisions = (*DecisionsService)(&c.common) + c.Alerts = (*AlertsService)(&c.common) + c.Auth = (*AuthService)(&c.common) + c.Metrics = (*MetricsService)(&c.common) + c.Signal = (*SignalService)(&c.common) + c.HeartBeat = (*HeartBeatService)(&c.common) + + return c, nil +} + +func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *http.Client) (*ApiClient, error) { + if client == nil { + client = &http.Client{} + if ht, ok := http.DefaultTransport.(*http.Transport); ok { + tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} + if Cert != nil { + tlsconfig.RootCAs = CaCertPool + tlsconfig.Certificates = []tls.Certificate{*Cert} + } + ht.TLSClientConfig = &tlsconfig + client.Transport = ht + } + } + c := &ApiClient{client: client, BaseURL: URL, UserAgent: userAgent, URLPrefix: prefix} + c.common.client = c + c.Decisions = (*DecisionsService)(&c.common) + c.Alerts = (*AlertsService)(&c.common) + c.Auth = (*AuthService)(&c.common) + c.Metrics = (*MetricsService)(&c.common) + c.Signal = (*SignalService)(&c.common) + c.HeartBeat = (*HeartBeatService)(&c.common) + + return c, nil +} + +func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { + if client == nil { + client = &http.Client{} + } + tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} + if Cert != nil { + tlsconfig.RootCAs = CaCertPool + tlsconfig.Certificates = []tls.Certificate{*Cert} + } + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig + c := &ApiClient{client: client, BaseURL: config.URL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} + c.common.client = c + c.Decisions = (*DecisionsService)(&c.common) + c.Alerts = (*AlertsService)(&c.common) + c.Auth = (*AuthService)(&c.common) + + resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password}) + /*if we have http status, return it*/ + if err != nil { + if resp != nil && resp.Response != nil { + return nil, errors.Wrapf(err, "api register (%s) http %s : %s", c.BaseURL, resp.Response.Status, err) + } + return nil, errors.Wrapf(err, "api register (%s) : %s", c.BaseURL, err) + } + return c, nil + +} + +type Response struct { + Response *http.Response + //add our pagination stuff + //NextPage int + //... +} + +type ErrorResponse struct { + models.ErrorResponse +} + +func (e *ErrorResponse) Error() string { + err := fmt.Sprintf("API error: %s", *e.Message) + if len(e.Errors) > 0 { + err += fmt.Sprintf(" (%s)", e.Errors) + } + return err +} + +func newResponse(r *http.Response) *Response { + response := &Response{Response: r} + return response +} + +func CheckResponse(r *http.Response) error { + if c := r.StatusCode; 200 <= c && c <= 299 { + return nil + } + errorResponse := &ErrorResponse{} + data, err := io.ReadAll(r.Body) + if err == nil && data != nil { + err := json.Unmarshal(data, errorResponse) + if err != nil { + return errors.Wrapf(err, "http code %d, invalid body", r.StatusCode) + } + } else { + errorResponse.Message = new(string) + *errorResponse.Message = fmt.Sprintf("http code %d, no error message", r.StatusCode) + } + return errorResponse +} + +type ListOpts struct { + //Page int + //PerPage int +} + +type DeleteOpts struct { + //?? +} + +type AddOpts struct { + //?? +} diff --git a/pkg/apiclient/client_http.go b/pkg/apiclient/client_http.go new file mode 100644 index 0000000..bfe328b --- /dev/null +++ b/pkg/apiclient/client_http.go @@ -0,0 +1,119 @@ +package apiclient + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/http/httputil" + "net/url" + "strings" + + log "github.com/sirupsen/logrus" +) + +func (c *ApiClient) NewRequest(method, url string, body interface{}) (*http.Request, error) { + if !strings.HasSuffix(c.BaseURL.Path, "/") { + return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) + } + u, err := c.BaseURL.Parse(url) + if err != nil { + return nil, err + } + + var buf io.ReadWriter + if body != nil { + buf = &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, u.String(), buf) + if err != nil { + return nil, err + } + + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + return req, nil +} + +func (c *ApiClient) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) { + if ctx == nil { + return nil, errors.New("context must be non-nil") + } + req = req.WithContext(ctx) + + // Check rate limit + + if c.UserAgent != "" { + req.Header.Add("User-Agent", c.UserAgent) + } + + resp, err := c.client.Do(req) + if resp != nil && resp.Body != nil { + defer resp.Body.Close() + } + + if err != nil { + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + // If the error type is *url.Error, sanitize its URL before returning. + if e, ok := err.(*url.Error); ok { + if url, err := url.Parse(e.URL); err == nil { + e.URL = url.String() + return newResponse(resp), e + } + return newResponse(resp), err + } + return newResponse(resp), err + } + + if log.GetLevel() >= log.DebugLevel { + for k, v := range resp.Header { + log.Debugf("[headers] %s : %s", k, v) + } + + dump, err := httputil.DumpResponse(resp, true) + if err == nil { + log.Debugf("Response: %s", string(dump)) + } + } + + response := newResponse(resp) + + err = CheckResponse(resp) + if err != nil { + return response, err + } + + if v != nil { + if w, ok := v.(io.Writer); ok { + io.Copy(w, resp.Body) + } else { + decErr := json.NewDecoder(resp.Body).Decode(v) + if decErr == io.EOF { + decErr = nil // ignore EOF errors caused by empty response body + } + if decErr != nil { + err = decErr + } + } + } + return response, err +} diff --git a/pkg/apiclient/client_http_test.go b/pkg/apiclient/client_http_test.go new file mode 100644 index 0000000..f9ad572 --- /dev/null +++ b/pkg/apiclient/client_http_test.go @@ -0,0 +1,76 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + "net/url" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/stretchr/testify/assert" +) + +func TestNewRequestInvalid(t *testing.T) { + mux, urlx, teardown := setup() + defer teardown() + //missing slash in uri + apiURL, err := url.Parse(urlx) + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + if err != nil { + t.Fatalf("new api client: %s", err) + } + /*mock login*/ + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + }) + + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusOK) + }) + + _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) + assert.Contains(t, err.Error(), `building request: BaseURL must have a trailing slash, but `) +} + +func TestNewRequestTimeout(t *testing.T) { + mux, urlx, teardown := setup() + defer teardown() + //missing slash in uri + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + if err != nil { + t.Fatalf("new api client: %s", err) + } + /*mock login*/ + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + time.Sleep(2 * time.Second) + }) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + _, _, err = client.Alerts.List(ctx, AlertsListOpts{}) + assert.Contains(t, err.Error(), `performing request: context deadline exceeded`) +} diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go new file mode 100644 index 0000000..7bbc08d --- /dev/null +++ b/pkg/apiclient/client_test.go @@ -0,0 +1,205 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + log "github.com/sirupsen/logrus" +) + +/*this is a ripoff of google/go-github approach : +- setup a test http server along with a client that is configured to talk to test server +- each test will then bind handler for the method(s) they want to try +*/ + +func setup() (mux *http.ServeMux, serverURL string, teardown func()) { + // mux is the HTTP request multiplexer used with the test server. + mux = http.NewServeMux() + baseURLPath := "/v1" + + apiHandler := http.NewServeMux() + apiHandler.Handle(baseURLPath+"/", http.StripPrefix(baseURLPath, mux)) + + // server is a test HTTP server used to provide mock API responses. + server := httptest.NewServer(apiHandler) + + return mux, server.URL, server.Close +} + +func testMethod(t *testing.T, r *http.Request, want string) { + t.Helper() + if got := r.Method; got != want { + t.Errorf("Request method: %v, want %v", got, want) + } +} + +func TestNewClientOk(t *testing.T) { + mux, urlx, teardown := setup() + defer teardown() + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + if err != nil { + t.Fatalf("new api client: %s", err) + } + /*mock login*/ + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusOK) + }) + + _, resp, err := client.Alerts.List(context.Background(), AlertsListOpts{}) + if err != nil { + t.Fatalf("test Unable to list alerts : %+v", err) + } + if resp.Response.StatusCode != http.StatusOK { + t.Fatalf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusCreated) + } +} + +func TestNewClientKo(t *testing.T) { + mux, urlx, teardown := setup() + defer teardown() + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + if err != nil { + t.Fatalf("new api client: %s", err) + } + /*mock login*/ + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + }) + + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusOK) + }) + + _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) + assert.Contains(t, err.Error(), `API error: bad login/password`) + log.Printf("err-> %s", err) +} + +func TestNewDefaultClient(t *testing.T) { + mux, urlx, teardown := setup() + defer teardown() + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + client, err := NewDefaultClient(apiURL, "/v1", "", nil) + if err != nil { + t.Fatalf("new api client: %s", err) + } + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + }) + _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) + assert.Contains(t, err.Error(), `performing request: API error: brr`) + log.Printf("err-> %s", err) +} + +func TestNewClientRegisterKO(t *testing.T) { + apiURL, err := url.Parse("http://127.0.0.1:4242/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + _, err = RegisterClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }, &http.Client{}) + if runtime.GOOS != "windows" { + assert.Contains(t, fmt.Sprintf("%s", err), "dial tcp 127.0.0.1:4242: connect: connection refused") + } else { + assert.Contains(t, fmt.Sprintf("%s", err), " No connection could be made because the target machine actively refused it.") + } +} + +func TestNewClientRegisterOK(t *testing.T) { + log.SetLevel(log.TraceLevel) + mux, urlx, teardown := setup() + defer teardown() + + /*mock login*/ + mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + client, err := RegisterClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }, &http.Client{}) + if err != nil { + t.Fatalf("while registering client : %s", err) + } + log.Printf("->%T", client) +} + +func TestNewClientBadAnswer(t *testing.T) { + log.SetLevel(log.TraceLevel) + mux, urlx, teardown := setup() + defer teardown() + + /*mock login*/ + mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(`bad`)) + }) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + _, err = RegisterClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }, &http.Client{}) + assert.Contains(t, fmt.Sprintf("%s", err), `invalid body: invalid character 'b' looking for beginning of value`) +} diff --git a/pkg/apiclient/config.go b/pkg/apiclient/config.go new file mode 100644 index 0000000..b87a708 --- /dev/null +++ b/pkg/apiclient/config.go @@ -0,0 +1,17 @@ +package apiclient + +import ( + "net/url" + + "github.com/go-openapi/strfmt" +) + +type Config struct { + MachineID string + Password strfmt.Password + Scenarios []string + URL *url.URL + VersionPrefix string + UserAgent string + UpdateScenario func() ([]string, error) +} diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go new file mode 100644 index 0000000..e2b32e8 --- /dev/null +++ b/pkg/apiclient/decisions_service.go @@ -0,0 +1,141 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/models" + qs "github.com/google/go-querystring/query" +) + +type DecisionsService service + +type DecisionsListOpts struct { + ScopeEquals *string `url:"scope,omitempty"` + ValueEquals *string `url:"value,omitempty"` + TypeEquals *string `url:"type,omitempty"` + IPEquals *string `url:"ip,omitempty"` + RangeEquals *string `url:"range,omitempty"` + Contains *bool `url:"contains,omitempty"` + ListOpts +} + +type DecisionsStreamOpts struct { + Startup bool `url:"startup,omitempty"` + Scopes string `url:"scopes,omitempty"` + ScenariosContaining string `url:"scenarios_containing,omitempty"` + ScenariosNotContaining string `url:"scenarios_not_containing,omitempty"` + Origins string `url:"origins,omitempty"` +} + +func (o *DecisionsStreamOpts) addQueryParamsToURL(url string) (string, error) { + params, err := qs.Values(o) + if err != nil { + return "", err + } + return fmt.Sprintf("%s?%s", url, params.Encode()), nil +} + +type DecisionsDeleteOpts struct { + ScopeEquals *string `url:"scope,omitempty"` + ValueEquals *string `url:"value,omitempty"` + TypeEquals *string `url:"type,omitempty"` + IPEquals *string `url:"ip,omitempty"` + RangeEquals *string `url:"range,omitempty"` + Contains *bool `url:"contains,omitempty"` + // + ScenarioEquals *string `url:"scenario,omitempty"` + ListOpts +} + +// to demo query arguments +func (s *DecisionsService) List(ctx context.Context, opts DecisionsListOpts) (*models.GetDecisionsResponse, *Response, error) { + var decisions models.GetDecisionsResponse + params, err := qs.Values(opts) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("%s/decisions?%s", s.client.URLPrefix, params.Encode()) + + req, err := s.client.NewRequest(http.MethodGet, u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &decisions) + if err != nil { + return nil, resp, err + } + return &decisions, resp, nil +} + +func (s *DecisionsService) GetStream(ctx context.Context, opts DecisionsStreamOpts) (*models.DecisionsStreamResponse, *Response, error) { + var decisions models.DecisionsStreamResponse + u, err := opts.addQueryParamsToURL(s.client.URLPrefix + "/decisions/stream") + if err != nil { + return nil, nil, err + } + req, err := s.client.NewRequest(http.MethodGet, u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &decisions) + if err != nil { + return nil, resp, err + } + + return &decisions, resp, nil +} + +func (s *DecisionsService) StopStream(ctx context.Context) (*Response, error) { + + u := fmt.Sprintf("%s/decisions", s.client.URLPrefix) + req, err := s.client.NewRequest(http.MethodDelete, u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +func (s *DecisionsService) Delete(ctx context.Context, opts DecisionsDeleteOpts) (*models.DeleteDecisionResponse, *Response, error) { + var deleteDecisionResponse models.DeleteDecisionResponse + params, err := qs.Values(opts) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("%s/decisions?%s", s.client.URLPrefix, params.Encode()) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &deleteDecisionResponse) + if err != nil { + return nil, resp, err + } + return &deleteDecisionResponse, resp, nil +} + +func (s *DecisionsService) DeleteOne(ctx context.Context, decision_id string) (*models.DeleteDecisionResponse, *Response, error) { + var deleteDecisionResponse models.DeleteDecisionResponse + u := fmt.Sprintf("%s/decisions/%s", s.client.URLPrefix, decision_id) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &deleteDecisionResponse) + if err != nil { + return nil, resp, err + } + return &deleteDecisionResponse, resp, nil +} diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go new file mode 100644 index 0000000..8ea5e39 --- /dev/null +++ b/pkg/apiclient/decisions_service_test.go @@ -0,0 +1,341 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + "net/url" + "reflect" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDecisionsList(t *testing.T) { + log.SetLevel(log.DebugLevel) + + mux, urlx, teardown := setup() + defer teardown() + + mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + if r.URL.RawQuery == "ip=1.2.3.4" { + assert.Equal(t, r.URL.RawQuery, "ip=1.2.3.4") + assert.Equal(t, r.Header.Get("X-Api-Key"), "ixu") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]`)) + } else { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`null`)) + //no results + } + }) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + //ok answer + auth := &APIKeyTransport{ + APIKey: "ixu", + } + + newcli, err := NewDefaultClient(apiURL, "v1", "toto", auth.Client()) + if err != nil { + t.Fatalf("new api client: %s", err) + } + + tduration := "3h59m55.756182786s" + torigin := "cscli" + tscenario := "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'" + tscope := "Ip" + ttype := "ban" + tvalue := "1.2.3.4" + expected := &models.GetDecisionsResponse{ + &models.Decision{ + Duration: &tduration, + ID: 4, + Origin: &torigin, + Scenario: &tscenario, + Scope: &tscope, + Type: &ttype, + Value: &tvalue, + }, + } + + //OK decisions + decisionsFilter := DecisionsListOpts{IPEquals: new(string)} + *decisionsFilter.IPEquals = "1.2.3.4" + decisions, resp, err := newcli.Decisions.List(context.Background(), decisionsFilter) + + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + + if err != nil { + t.Fatalf("new api client: %s", err) + } + if !reflect.DeepEqual(*decisions, *expected) { + t.Fatalf("returned %+v, want %+v", resp, expected) + } + + //Empty return + decisionsFilter = DecisionsListOpts{IPEquals: new(string)} + *decisionsFilter.IPEquals = "1.2.3.5" + decisions, resp, err = newcli.Decisions.List(context.Background(), decisionsFilter) + require.NoError(t, err) + + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + assert.Equal(t, len(*decisions), 0) + +} + +func TestDecisionsStream(t *testing.T) { + log.SetLevel(log.DebugLevel) + + mux, urlx, teardown := setup() + defer teardown() + + mux.HandleFunc("/decisions/stream", func(w http.ResponseWriter, r *http.Request) { + + assert.Equal(t, r.Header.Get("X-Api-Key"), "ixu") + testMethod(t, r, http.MethodGet) + if r.Method == http.MethodGet { + if r.URL.RawQuery == "startup=true" { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"deleted":null,"new":[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]}`)) + } else { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"deleted":null,"new":null}`)) + } + } + }) + mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, r.Header.Get("X-Api-Key"), "ixu") + testMethod(t, r, http.MethodDelete) + if r.Method == http.MethodDelete { + w.WriteHeader(http.StatusOK) + } + }) + + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + //ok answer + auth := &APIKeyTransport{ + APIKey: "ixu", + } + + newcli, err := NewDefaultClient(apiURL, "v1", "toto", auth.Client()) + if err != nil { + t.Fatalf("new api client: %s", err) + } + + tduration := "3h59m55.756182786s" + torigin := "cscli" + tscenario := "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'" + tscope := "Ip" + ttype := "ban" + tvalue := "1.2.3.4" + expected := &models.DecisionsStreamResponse{ + New: models.GetDecisionsResponse{ + &models.Decision{ + Duration: &tduration, + ID: 4, + Origin: &torigin, + Scenario: &tscenario, + Scope: &tscope, + Type: &ttype, + Value: &tvalue, + }, + }, + } + + decisions, resp, err := newcli.Decisions.GetStream(context.Background(), DecisionsStreamOpts{Startup: true}) + require.NoError(t, err) + + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + + if err != nil { + t.Fatalf("new api client: %s", err) + } + if !reflect.DeepEqual(*decisions, *expected) { + t.Fatalf("returned %+v, want %+v", resp, expected) + } + + //and second call, we get empty lists + decisions, resp, err = newcli.Decisions.GetStream(context.Background(), DecisionsStreamOpts{Startup: false}) + require.NoError(t, err) + + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } + assert.Equal(t, 0, len(decisions.New)) + assert.Equal(t, 0, len(decisions.Deleted)) + + //delete stream + resp, err = newcli.Decisions.StopStream(context.Background()) + require.NoError(t, err) + + if resp.Response.StatusCode != http.StatusOK { + t.Errorf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusOK) + } +} + +func TestDeleteDecisions(t *testing.T) { + mux, urlx, teardown := setup() + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "DELETE") + assert.Equal(t, r.URL.RawQuery, "ip=1.2.3.4") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"nbDeleted":"1"}`)) + //w.Write([]byte(`{"message":"0 deleted alerts"}`)) + }) + log.Printf("URL is %s", urlx) + apiURL, err := url.Parse(urlx + "/") + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v1", + }) + + if err != nil { + t.Fatalf("new api client: %s", err) + } + + filters := DecisionsDeleteOpts{IPEquals: new(string)} + *filters.IPEquals = "1.2.3.4" + deleted, _, err := client.Decisions.Delete(context.Background(), filters) + if err != nil { + t.Fatalf("unexpected err : %s", err) + } + assert.Equal(t, "1", deleted.NbDeleted) + + defer teardown() +} + +func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { + baseURLString := "http://localhost:8080/v1/decisions/stream" + type fields struct { + Startup bool + Scopes string + ScenariosContaining string + ScenariosNotContaining string + } + tests := []struct { + name string + fields fields + want string + wantErr bool + }{ + { + name: "no filter", + want: baseURLString + "?", + }, + { + name: "startup=true", + fields: fields{ + Startup: true, + }, + want: baseURLString + "?startup=true", + }, + { + name: "set all params", + fields: fields{ + Startup: true, + Scopes: "ip,range", + ScenariosContaining: "ssh", + ScenariosNotContaining: "bf", + }, + want: baseURLString + "?scenarios_containing=ssh&scenarios_not_containing=bf&scopes=ip%2Crange&startup=true", + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + o := &DecisionsStreamOpts{ + Startup: tt.fields.Startup, + Scopes: tt.fields.Scopes, + ScenariosContaining: tt.fields.ScenariosContaining, + ScenariosNotContaining: tt.fields.ScenariosNotContaining, + } + got, err := o.addQueryParamsToURL(baseURLString) + if (err != nil) != tt.wantErr { + t.Errorf("DecisionsStreamOpts.addQueryParamsToURL() error = %v, wantErr %v", err, tt.wantErr) + return + } + + gotURL, err := url.Parse(got) + if err != nil { + t.Errorf("DecisionsStreamOpts.addQueryParamsToURL() got error while parsing URL: %s", err) + } + + expectedURL, err := url.Parse(tt.want) + if err != nil { + t.Errorf("DecisionsStreamOpts.addQueryParamsToURL() got error while parsing URL: %s", err) + } + + if *gotURL != *expectedURL { + t.Errorf("DecisionsStreamOpts.addQueryParamsToURL() = %v, want %v", *gotURL, *expectedURL) + } + }) + } +} + +// func TestDeleteOneDecision(t *testing.T) { +// mux, urlx, teardown := setup() +// mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { +// w.WriteHeader(http.StatusOK) +// w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) +// }) +// mux.HandleFunc("/decisions/1", func(w http.ResponseWriter, r *http.Request) { +// testMethod(t, r, "DELETE") +// w.WriteHeader(http.StatusOK) +// w.Write([]byte(`{"nbDeleted":"1"}`)) +// }) +// log.Printf("URL is %s", urlx) +// apiURL, err := url.Parse(urlx + "/") +// if err != nil { +// t.Fatalf("parsing api url: %s", apiURL) +// } +// client, err := NewClient(&Config{ +// MachineID: "test_login", +// Password: "test_password", +// UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), +// URL: apiURL, +// VersionPrefix: "v1", +// }) + +// if err != nil { +// t.Fatalf("new api client: %s", err.Error()) +// } + +// filters := DecisionsDeleteOpts{IPEquals: new(string)} +// *filters.IPEquals = "1.2.3.4" +// deleted, _, err := client.Decisions.Delete(context.Background(), filters) +// if err != nil { +// t.Fatalf("unexpected err : %s", err) +// } +// assert.Equal(t, "1", deleted.NbDeleted) + +// defer teardown() +// } diff --git a/pkg/apiclient/heartbeat.go b/pkg/apiclient/heartbeat.go new file mode 100644 index 0000000..dc7c256 --- /dev/null +++ b/pkg/apiclient/heartbeat.go @@ -0,0 +1,62 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + tomb "gopkg.in/tomb.v2" +) + +type HeartBeatService service + +func (h *HeartBeatService) Ping(ctx context.Context) (bool, *Response, error) { + + u := fmt.Sprintf("%s/heartbeat", h.client.URLPrefix) + + req, err := h.client.NewRequest(http.MethodGet, u, nil) + if err != nil { + return false, nil, err + } + + resp, err := h.client.Do(ctx, req, nil) + if err != nil { + return false, resp, err + } + + return true, resp, nil +} + +func (h *HeartBeatService) StartHeartBeat(ctx context.Context, t *tomb.Tomb) { + t.Go(func() error { + defer types.CatchPanic("crowdsec/apiClient/heartbeat") + hbTimer := time.NewTicker(1 * time.Minute) + for { + select { + case <-hbTimer.C: + log.Debug("heartbeat: sending heartbeat") + ok, resp, err := h.Ping(ctx) + if err != nil { + log.Errorf("heartbeat error : %s", err) + continue + } + resp.Response.Body.Close() + if resp.Response.StatusCode != http.StatusOK { + log.Errorf("heartbeat unexpected return code : %d", resp.Response.StatusCode) + continue + } + if !ok { + log.Errorf("heartbeat returned false") + continue + } + case <-t.Dying(): + log.Debugf("heartbeat: stopping") + hbTimer.Stop() + return nil + } + } + }) +} diff --git a/pkg/apiclient/metrics.go b/pkg/apiclient/metrics.go new file mode 100644 index 0000000..ea44728 --- /dev/null +++ b/pkg/apiclient/metrics.go @@ -0,0 +1,27 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +type MetricsService service + +func (s *MetricsService) Add(ctx context.Context, metrics *models.Metrics) (interface{}, *Response, error) { + var response interface{} + + u := fmt.Sprintf("%s/metrics/", s.client.URLPrefix) + req, err := s.client.NewRequest(http.MethodPost, u, &metrics) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(ctx, req, &response) + if err != nil { + return nil, resp, err + } + return &response, resp, nil +} diff --git a/pkg/apiclient/signal.go b/pkg/apiclient/signal.go new file mode 100644 index 0000000..27d2e36 --- /dev/null +++ b/pkg/apiclient/signal.go @@ -0,0 +1,35 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/pkg/errors" +) + +type SignalService service + +func (s *SignalService) Add(ctx context.Context, signals *models.AddSignalsRequest) (interface{}, *Response, error) { + var response interface{} + + u := fmt.Sprintf("%s/signals", s.client.URLPrefix) + req, err := s.client.NewRequest(http.MethodPost, u, &signals) + if err != nil { + return nil, nil, errors.Wrap(err, "while building request") + } + + resp, err := s.client.Do(ctx, req, &response) + if err != nil { + return nil, resp, errors.Wrap(err, "while performing request") + } + if resp.Response.StatusCode != http.StatusOK { + log.Warnf("Signal push response : http %s", resp.Response.Status) + } else { + log.Debugf("Signal push response : http %s", resp.Response.Status) + } + return &response, resp, nil +} diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go new file mode 100644 index 0000000..9b6fc04 --- /dev/null +++ b/pkg/apiserver/alerts_test.go @@ -0,0 +1,510 @@ +package apiserver + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/gin-gonic/gin" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +type LAPI struct { + router *gin.Engine + loginResp models.WatcherAuthResponse + bouncerKey string + t *testing.T + DBConfig *csconfig.DatabaseCfg +} + +func SetupLAPITest(t *testing.T) LAPI { + t.Helper() + router, loginResp, config, err := InitMachineTest() + if err != nil { + t.Fatal(err) + } + + APIKey, err := CreateTestBouncer(config.API.Server.DbConfig) + if err != nil { + t.Fatal(err) + } + + return LAPI{ + router: router, + loginResp: loginResp, + bouncerKey: APIKey, + DBConfig: config.API.Server.DbConfig, + } +} + +func (l *LAPI) InsertAlertFromFile(path string) *httptest.ResponseRecorder { + alertReader := GetAlertReaderFromFile(path) + return l.RecordResponse(http.MethodPost, "/v1/alerts", alertReader, "password") +} + +func (l *LAPI) RecordResponse(verb string, url string, body *strings.Reader, authType string) *httptest.ResponseRecorder { + w := httptest.NewRecorder() + req, err := http.NewRequest(verb, url, body) + if err != nil { + l.t.Fatal(err) + } + if authType == "apikey" { + req.Header.Add("X-Api-Key", l.bouncerKey) + } else if authType == "password" { + AddAuthHeaders(req, l.loginResp) + } else { + l.t.Fatal("auth type not supported") + } + l.router.ServeHTTP(w, req) + return w +} + +func InitMachineTest() (*gin.Engine, models.WatcherAuthResponse, csconfig.Config, error) { + router, config, err := NewAPITest() + if err != nil { + return nil, models.WatcherAuthResponse{}, config, fmt.Errorf("unable to run local API: %s", err) + } + + loginResp, err := LoginToTestAPI(router, config) + if err != nil { + return nil, models.WatcherAuthResponse{}, config, fmt.Errorf("%s", err) + } + return router, loginResp, config, nil +} + +func LoginToTestAPI(router *gin.Engine, config csconfig.Config) (models.WatcherAuthResponse, error) { + body, err := CreateTestMachine(router) + if err != nil { + return models.WatcherAuthResponse{}, fmt.Errorf("%s", err) + } + err = ValidateMachine("test", config.API.Server.DbConfig) + if err != nil { + log.Fatalln(err) + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + loginResp := models.WatcherAuthResponse{} + err = json.NewDecoder(w.Body).Decode(&loginResp) + if err != nil { + return models.WatcherAuthResponse{}, fmt.Errorf("%s", err) + } + + return loginResp, nil +} + +func AddAuthHeaders(request *http.Request, authResponse models.WatcherAuthResponse) { + request.Header.Add("User-Agent", UserAgent) + request.Header.Add("Authorization", fmt.Sprintf("Bearer %s", authResponse.Token)) +} + +func TestSimulatedAlert(t *testing.T) { + lapi := SetupLAPITest(t) + lapi.InsertAlertFromFile("./tests/alert_minibulk+simul.json") + alertContent := GetAlertReaderFromFile("./tests/alert_minibulk+simul.json") + //exclude decision in simulation mode + + w := lapi.RecordResponse("GET", "/v1/alerts?simulated=false", alertContent, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) + assert.NotContains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) + //include decision in simulation mode + + w = lapi.RecordResponse("GET", "/v1/alerts?simulated=true", alertContent, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) + assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) +} + +func TestCreateAlert(t *testing.T) { + lapi := SetupLAPITest(t) + // Create Alert with invalid format + + w := lapi.RecordResponse(http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") + assert.Equal(t, 400, w.Code) + assert.Equal(t, "{\"message\":\"invalid character 'e' in literal true (expecting 'r')\"}", w.Body.String()) + + // Create Alert with invalid input + alertContent := GetAlertReaderFromFile("./tests/invalidAlert_sample.json") + + w = lapi.RecordResponse(http.MethodPost, "/v1/alerts", alertContent, "password") + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"validation failure list:\\n0.scenario in body is required\\n0.scenario_hash in body is required\\n0.scenario_version in body is required\\n0.simulated in body is required\\n0.source in body is required\"}", w.Body.String()) + + // Create Valid Alert + w = lapi.InsertAlertFromFile("./tests/alert_sample.json") + assert.Equal(t, 201, w.Code) + assert.Equal(t, "[\"1\"]", w.Body.String()) +} + +func TestCreateAlertChannels(t *testing.T) { + + apiServer, config, err := NewAPIServer() + if err != nil { + log.Fatalln(err) + } + apiServer.controller.PluginChannel = make(chan csplugin.ProfileAlert) + apiServer.InitController() + + loginResp, err := LoginToTestAPI(apiServer.router, config) + if err != nil { + log.Fatalln(err) + } + lapi := LAPI{router: apiServer.router, loginResp: loginResp} + + var pd csplugin.ProfileAlert + var wg sync.WaitGroup + + wg.Add(1) + go func() { + pd = <-apiServer.controller.PluginChannel + wg.Done() + }() + + go lapi.InsertAlertFromFile("./tests/alert_ssh-bf.json") + wg.Wait() + assert.Equal(t, len(pd.Alert.Decisions), 1) + apiServer.Close() +} + +func TestAlertListFilters(t *testing.T) { + lapi := SetupLAPITest(t) + lapi.InsertAlertFromFile("./tests/alert_ssh-bf.json") + alertContent := GetAlertReaderFromFile("./tests/alert_ssh-bf.json") + + //bad filter + + w := lapi.RecordResponse("GET", "/v1/alerts?test=test", alertContent, "password") + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"Filter parameter 'test' is unknown (=test): invalid filter\"}", w.Body.String()) + + //get without filters + + w = lapi.RecordResponse("GET", "/v1/alerts", emptyBody, "password") + assert.Equal(t, 200, w.Code) + //check alert and decision + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test decision_type filter (ok) + + w = lapi.RecordResponse("GET", "/v1/alerts?decision_type=ban", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test decision_type filter (bad value) + + w = lapi.RecordResponse("GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test scope (ok) + + w = lapi.RecordResponse("GET", "/v1/alerts?scope=Ip", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test scope (bad value) + + w = lapi.RecordResponse("GET", "/v1/alerts?scope=rarara", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test scenario (ok) + + w = lapi.RecordResponse("GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test scenario (bad value) + + w = lapi.RecordResponse("GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test ip (ok) + + w = lapi.RecordResponse("GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test ip (bad value) + + w = lapi.RecordResponse("GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test ip (invalid value) + + w = lapi.RecordResponse("GET", "/v1/alerts?ip=gruueq", emptyBody, "password") + assert.Equal(t, 500, w.Code) + assert.Equal(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) + + //test range (ok) + + w = lapi.RecordResponse("GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test range + + w = lapi.RecordResponse("GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test range (invalid value) + + w = lapi.RecordResponse("GET", "/v1/alerts?range=ratata", emptyBody, "password") + assert.Equal(t, 500, w.Code) + assert.Equal(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) + + //test since (ok) + + w = lapi.RecordResponse("GET", "/v1/alerts?since=1h", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test since (ok but yields no results) + + w = lapi.RecordResponse("GET", "/v1/alerts?since=1ns", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test since (invalid value) + + w = lapi.RecordResponse("GET", "/v1/alerts?since=1zuzu", emptyBody, "password") + assert.Equal(t, 500, w.Code) + assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) + + //test until (ok) + + w = lapi.RecordResponse("GET", "/v1/alerts?until=1ns", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test until (ok but no return) + + w = lapi.RecordResponse("GET", "/v1/alerts?until=1m", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test until (invalid value) + + w = lapi.RecordResponse("GET", "/v1/alerts?until=1zuzu", emptyBody, "password") + assert.Equal(t, 500, w.Code) + assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) + + //test simulated (ok) + + w = lapi.RecordResponse("GET", "/v1/alerts?simulated=true", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test simulated (ok) + + w = lapi.RecordResponse("GET", "/v1/alerts?simulated=false", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test has active decision + + w = lapi.RecordResponse("GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") + assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) + + //test has active decision + + w = lapi.RecordResponse("GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + + //test has active decision (invalid value) + + w = lapi.RecordResponse("GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") + assert.Equal(t, 500, w.Code) + assert.Equal(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) + +} + +func TestAlertBulkInsert(t *testing.T) { + lapi := SetupLAPITest(t) + //insert a bulk of 20 alerts to trigger bulk insert + lapi.InsertAlertFromFile("./tests/alert_bulk.json") + alertContent := GetAlertReaderFromFile("./tests/alert_bulk.json") + + w := lapi.RecordResponse("GET", "/v1/alerts", alertContent, "password") + assert.Equal(t, 200, w.Code) +} + +func TestListAlert(t *testing.T) { + lapi := SetupLAPITest(t) + lapi.InsertAlertFromFile("./tests/alert_sample.json") + // List Alert with invalid filter + + w := lapi.RecordResponse("GET", "/v1/alerts?test=test", emptyBody, "password") + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"Filter parameter 'test' is unknown (=test): invalid filter\"}", w.Body.String()) + + // List Alert + + w = lapi.RecordResponse("GET", "/v1/alerts", emptyBody, "password") + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "crowdsecurity/test") +} + +func TestCreateAlertErrors(t *testing.T) { + lapi := SetupLAPITest(t) + alertContent := GetAlertReaderFromFile("./tests/alert_sample.json") + + //test invalid bearer + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/alerts", alertContent) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", "ratata")) + lapi.router.ServeHTTP(w, req) + assert.Equal(t, 401, w.Code) + + //test invalid bearer + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/alerts", alertContent) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", lapi.loginResp.Token+"s")) + lapi.router.ServeHTTP(w, req) + assert.Equal(t, 401, w.Code) + +} + +func TestDeleteAlert(t *testing.T) { + lapi := SetupLAPITest(t) + lapi.InsertAlertFromFile("./tests/alert_sample.json") + + // Fail Delete Alert + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + AddAuthHeaders(req, lapi.loginResp) + req.RemoteAddr = "127.0.0.2:4242" + lapi.router.ServeHTTP(w, req) + assert.Equal(t, 403, w.Code) + assert.Equal(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) + + // Delete Alert + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + AddAuthHeaders(req, lapi.loginResp) + req.RemoteAddr = "127.0.0.1:4242" + lapi.router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) +} + +func TestDeleteAlertByID(t *testing.T) { + lapi := SetupLAPITest(t) + lapi.InsertAlertFromFile("./tests/alert_sample.json") + + // Fail Delete Alert + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) + AddAuthHeaders(req, lapi.loginResp) + req.RemoteAddr = "127.0.0.2:4242" + lapi.router.ServeHTTP(w, req) + assert.Equal(t, 403, w.Code) + assert.Equal(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) + + // Delete Alert + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) + AddAuthHeaders(req, lapi.loginResp) + req.RemoteAddr = "127.0.0.1:4242" + lapi.router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) +} + +func TestDeleteAlertTrustedIPS(t *testing.T) { + cfg := LoadTestConfig() + // IPv6 mocking doesn't seem to work. + // cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24", "::"} + cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24"} + cfg.API.Server.ListenURI = "::8080" + server, err := NewServer(cfg.API.Server) + if err != nil { + log.Fatal(err) + } + err = server.InitController() + if err != nil { + log.Fatal(err) + } + router, err := server.Router() + if err != nil { + log.Fatal(err) + } + loginResp, err := LoginToTestAPI(router, cfg) + if err != nil { + log.Fatal(err) + } + lapi := LAPI{ + router: router, + loginResp: loginResp, + t: t, + } + + assertAlertDeleteFailedFromIP := func(ip string) { + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + + AddAuthHeaders(req, loginResp) + req.RemoteAddr = ip + ":1234" + + router.ServeHTTP(w, req) + assert.Equal(t, 403, w.Code) + assert.Contains(t, w.Body.String(), fmt.Sprintf(`{"message":"access forbidden from this IP (%s)"}`, ip)) + } + + assertAlertDeletedFromIP := func(ip string) { + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + AddAuthHeaders(req, loginResp) + req.RemoteAddr = ip + ":1234" + + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + } + + lapi.InsertAlertFromFile("./tests/alert_sample.json") + assertAlertDeleteFailedFromIP("4.3.2.1") + assertAlertDeletedFromIP("1.2.3.4") + + lapi.InsertAlertFromFile("./tests/alert_sample.json") + assertAlertDeletedFromIP("1.2.4.0") + lapi.InsertAlertFromFile("./tests/alert_sample.json") + assertAlertDeletedFromIP("1.2.4.1") + lapi.InsertAlertFromFile("./tests/alert_sample.json") + assertAlertDeletedFromIP("1.2.4.255") + + lapi.InsertAlertFromFile("./tests/alert_sample.json") + assertAlertDeletedFromIP("127.0.0.1") + +} diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go new file mode 100644 index 0000000..d1db144 --- /dev/null +++ b/pkg/apiserver/api_key_test.go @@ -0,0 +1,52 @@ +package apiserver + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestAPIKey(t *testing.T) { + router, config, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + APIKey, err := CreateTestBouncer(config.API.Server.DbConfig) + if err != nil { + log.Fatal(err) + } + // Login with empty token + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 403, w.Code) + assert.Equal(t, "{\"message\":\"access forbidden\"}", w.Body.String()) + + // Login with invalid token + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", "a1b2c3d4e5f6") + router.ServeHTTP(w, req) + + assert.Equal(t, 403, w.Code) + assert.Equal(t, "{\"message\":\"access forbidden\"}", w.Body.String()) + + // Login with valid token + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "null", w.Body.String()) + +} diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go new file mode 100644 index 0000000..db33aca --- /dev/null +++ b/pkg/apiserver/apic.go @@ -0,0 +1,632 @@ +package apiserver + +import ( + "context" + "fmt" + "math/rand" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +var ( + pullIntervalDefault = time.Hour * 2 + pullIntervalDelta = 5 * time.Minute + pushIntervalDefault = time.Second * 30 + pushIntervalDelta = time.Second * 15 + metricsIntervalDefault = time.Minute * 30 + metricsIntervalDelta = time.Minute * 15 +) + +var SCOPE_CAPI string = "CAPI" +var SCOPE_CAPI_ALIAS string = "crowdsecurity/community-blocklist" //we don't use "CAPI" directly, to make it less confusing for the user +var SCOPE_LISTS string = "lists" + +type apic struct { + // when changing the intervals in tests, always set *First too + // or they can be negative + pullInterval time.Duration + pullIntervalFirst time.Duration + pushInterval time.Duration + pushIntervalFirst time.Duration + metricsInterval time.Duration + metricsIntervalFirst time.Duration + dbClient *database.Client + apiClient *apiclient.ApiClient + alertToPush chan []*models.Alert + mu sync.Mutex + pushTomb tomb.Tomb + pullTomb tomb.Tomb + metricsTomb tomb.Tomb + startup bool + credentials *csconfig.ApiCredentialsCfg + scenarioList []string + consoleConfig *csconfig.ConsoleConfig +} + +// randomDuration returns a duration value between d-delta and d+delta +func randomDuration(d time.Duration, delta time.Duration) time.Duration { + return time.Duration(float64(d) + float64(delta)*(-1.0+2.0*rand.Float64())) +} + +func (a *apic) FetchScenariosListFromDB() ([]string, error) { + scenarios := make([]string, 0) + machines, err := a.dbClient.ListMachines() + if err != nil { + return nil, errors.Wrap(err, "while listing machines") + } + //merge all scenarios together + for _, v := range machines { + machineScenarios := strings.Split(v.Scenarios, ",") + log.Debugf("%d scenarios for machine %d", len(machineScenarios), v.ID) + for _, sv := range machineScenarios { + if !types.InSlice(sv, scenarios) && sv != "" { + scenarios = append(scenarios, sv) + } + } + } + log.Debugf("Returning list of scenarios : %+v", scenarios) + return scenarios, nil +} + +func alertToSignal(alert *models.Alert, scenarioTrust string) *models.AddSignalsRequestItem { + return &models.AddSignalsRequestItem{ + Message: alert.Message, + Scenario: alert.Scenario, + ScenarioHash: alert.ScenarioHash, + ScenarioVersion: alert.ScenarioVersion, + Source: alert.Source, + StartAt: alert.StartAt, + StopAt: alert.StopAt, + CreatedAt: alert.CreatedAt, + MachineID: alert.MachineID, + ScenarioTrust: &scenarioTrust, + } +} + +func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig) (*apic, error) { + var err error + ret := &apic{ + alertToPush: make(chan []*models.Alert), + dbClient: dbClient, + mu: sync.Mutex{}, + startup: true, + credentials: config.Credentials, + pullTomb: tomb.Tomb{}, + pushTomb: tomb.Tomb{}, + metricsTomb: tomb.Tomb{}, + scenarioList: make([]string, 0), + consoleConfig: consoleConfig, + pullInterval: pullIntervalDefault, + pullIntervalFirst: randomDuration(pullIntervalDefault, pullIntervalDelta), + pushInterval: pushIntervalDefault, + pushIntervalFirst: randomDuration(pushIntervalDefault, pushIntervalDelta), + metricsInterval: metricsIntervalDefault, + metricsIntervalFirst: randomDuration(metricsIntervalDefault, metricsIntervalDelta), + } + + password := strfmt.Password(config.Credentials.Password) + apiURL, err := url.Parse(config.Credentials.URL) + if err != nil { + return nil, errors.Wrapf(err, "while parsing '%s'", config.Credentials.URL) + } + ret.scenarioList, err = ret.FetchScenariosListFromDB() + if err != nil { + return nil, errors.Wrap(err, "while fetching scenarios from db") + } + ret.apiClient, err = apiclient.NewClient(&apiclient.Config{ + MachineID: config.Credentials.Login, + Password: password, + UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + URL: apiURL, + VersionPrefix: "v2", + Scenarios: ret.scenarioList, + UpdateScenario: ret.FetchScenariosListFromDB, + }) + return ret, err +} + +// keep track of all alerts in cache and push it to CAPI every PushInterval. +func (a *apic) Push() error { + defer types.CatchPanic("lapi/pushToAPIC") + + var cache models.AddSignalsRequest + ticker := time.NewTicker(a.pushIntervalFirst) + + log.Infof("Start push to CrowdSec Central API (interval: %s once, then %s)", a.pushIntervalFirst.Round(time.Second), a.pushInterval) + + for { + select { + case <-a.pushTomb.Dying(): // if one apic routine is dying, do we kill the others? + a.pullTomb.Kill(nil) + a.metricsTomb.Kill(nil) + log.Infof("push tomb is dying, sending cache (%d elements) before exiting", len(cache)) + if len(cache) == 0 { + return nil + } + go a.Send(&cache) + return nil + case <-ticker.C: + ticker.Reset(a.pushInterval) + if len(cache) > 0 { + a.mu.Lock() + cacheCopy := cache + cache = make(models.AddSignalsRequest, 0) + a.mu.Unlock() + log.Infof("Signal push: %d signals to push", len(cacheCopy)) + go a.Send(&cacheCopy) + } + case alerts := <-a.alertToPush: + var signals []*models.AddSignalsRequestItem + for _, alert := range alerts { + if ok := shouldShareAlert(alert, a.consoleConfig); ok { + signals = append(signals, alertToSignal(alert, getScenarioTrustOfAlert(alert))) + } + } + a.mu.Lock() + cache = append(cache, signals...) + a.mu.Unlock() + } + } +} + +func getScenarioTrustOfAlert(alert *models.Alert) string { + scenarioTrust := "certified" + if alert.ScenarioHash == nil || *alert.ScenarioHash == "" { + scenarioTrust = "custom" + } else if alert.ScenarioVersion == nil || *alert.ScenarioVersion == "" || *alert.ScenarioVersion == "?" { + scenarioTrust = "tainted" + } + if len(alert.Decisions) > 0 { + if *alert.Decisions[0].Origin == "cscli" { + scenarioTrust = "manual" + } + } + return scenarioTrust +} + +func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig) bool { + if *alert.Simulated { + log.Debugf("simulation enabled for alert (id:%d), will not be sent to CAPI", alert.ID) + return false + } + switch scenarioTrust := getScenarioTrustOfAlert(alert); scenarioTrust { + case "manual": + if !*consoleConfig.ShareManualDecisions { + log.Debugf("manual decision generated an alert, doesn't send it to CAPI because options is disabled") + return false + } + case "tainted": + if !*consoleConfig.ShareTaintedScenarios { + log.Debugf("tainted scenario generated an alert, doesn't send it to CAPI because options is disabled") + return false + } + case "custom": + if !*consoleConfig.ShareCustomScenarios { + log.Debugf("custom scenario generated an alert, doesn't send it to CAPI because options is disabled") + return false + } + } + return true +} + +func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { + /*we do have a problem with this : + The apic.Push background routine reads from alertToPush chan. + This chan is filled by Controller.CreateAlert + + If the chan apic.Send hangs, the alertToPush chan will become full, + with means that Controller.CreateAlert is going to hang, blocking API worker(s). + + So instead, we prefer to cancel write. + + I don't know enough about gin to tell how much of an issue it can be. + */ + var cache []*models.AddSignalsRequestItem = *cacheOrig + var send models.AddSignalsRequest + + bulkSize := 50 + pageStart := 0 + pageEnd := bulkSize + + for { + + if pageEnd >= len(cache) { + send = cache[pageStart:] + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, _, err := a.apiClient.Signal.Add(ctx, &send) + if err != nil { + log.Errorf("Error while sending final chunk to central API : %s", err) + return + } + break + } + send = cache[pageStart:pageEnd] + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, _, err := a.apiClient.Signal.Add(ctx, &send) + if err != nil { + //we log it here as well, because the return value of func might be discarded + log.Errorf("Error while sending chunk to central API : %s", err) + } + pageStart += bulkSize + pageEnd += bulkSize + } +} + +func (a *apic) CAPIPullIsOld() (bool, error) { + /*only pull community blocklist if it's older than 1h30 */ + alerts := a.dbClient.Ent.Alert.Query() + alerts = alerts.Where(alert.HasDecisionsWith(decision.OriginEQ(database.CapiMachineID))) + alerts = alerts.Where(alert.CreatedAtGTE(time.Now().UTC().Add(-time.Duration(1*time.Hour + 30*time.Minute)))) //nolint:unconvert + count, err := alerts.Count(a.dbClient.CTX) + if err != nil { + return false, errors.Wrap(err, "while looking for CAPI alert") + } + if count > 0 { + log.Printf("last CAPI pull is newer than 1h30, skip.") + return false, nil + } + return true, nil +} + +func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, delete_counters map[string]map[string]int) (int, error) { + var filter map[string][]string + var nbDeleted int + for _, decision := range deletedDecisions { + if strings.ToLower(*decision.Scope) == "ip" { + filter = make(map[string][]string, 1) + filter["value"] = []string{*decision.Value} + } else { + filter = make(map[string][]string, 3) + filter["value"] = []string{*decision.Value} + filter["type"] = []string{*decision.Type} + filter["scopes"] = []string{*decision.Scope} + } + filter["origin"] = []string{*decision.Origin} + + dbCliRet, err := a.dbClient.SoftDeleteDecisionsWithFilter(filter) + if err != nil { + return 0, errors.Wrap(err, "deleting decisions error") + } + dbCliDel, err := strconv.Atoi(dbCliRet) + if err != nil { + return 0, errors.Wrapf(err, "converting db ret %d", dbCliDel) + } + updateCounterForDecision(delete_counters, decision, dbCliDel) + nbDeleted += dbCliDel + } + return nbDeleted, nil + +} + +func createAlertsForDecisions(decisions []*models.Decision) []*models.Alert { + newAlerts := make([]*models.Alert, 0) + for _, decision := range decisions { + found := false + for _, sub := range newAlerts { + if sub.Source.Scope == nil { + log.Warningf("nil scope in %+v", sub) + continue + } + if *decision.Origin == SCOPE_CAPI { + if *sub.Source.Scope == SCOPE_CAPI { + found = true + break + } + } else if *decision.Origin == SCOPE_LISTS { + if *sub.Source.Scope == *decision.Origin { + if sub.Scenario == nil { + log.Warningf("nil scenario in %+v", sub) + } + if *sub.Scenario == *decision.Scenario { + found = true + break + } + } + } else { + log.Warningf("unknown origin %s : %+v", *decision.Origin, decision) + } + } + if !found { + log.Debugf("Create entry for origin:%s scenario:%s", *decision.Origin, *decision.Scenario) + newAlerts = append(newAlerts, createAlertForDecision(decision)) + } + } + return newAlerts +} + +func createAlertForDecision(decision *models.Decision) *models.Alert { + newAlert := &models.Alert{} + newAlert.Source = &models.Source{} + newAlert.Source.Scope = types.StrPtr("") + if *decision.Origin == SCOPE_CAPI { //to make things more user friendly, we replace CAPI with community-blocklist + newAlert.Scenario = types.StrPtr(SCOPE_CAPI) + newAlert.Source.Scope = types.StrPtr(SCOPE_CAPI) + } else if *decision.Origin == SCOPE_LISTS { + newAlert.Scenario = types.StrPtr(*decision.Scenario) + newAlert.Source.Scope = types.StrPtr(SCOPE_LISTS) + } else { + log.Warningf("unknown origin %s", *decision.Origin) + } + newAlert.Message = types.StrPtr("") + newAlert.Source.Value = types.StrPtr("") + newAlert.StartAt = types.StrPtr(time.Now().UTC().Format(time.RFC3339)) + newAlert.StopAt = types.StrPtr(time.Now().UTC().Format(time.RFC3339)) + newAlert.Capacity = types.Int32Ptr(0) + newAlert.Simulated = types.BoolPtr(false) + newAlert.EventsCount = types.Int32Ptr(0) + newAlert.Leakspeed = types.StrPtr("") + newAlert.ScenarioHash = types.StrPtr("") + newAlert.ScenarioVersion = types.StrPtr("") + newAlert.MachineID = database.CapiMachineID + return newAlert +} + +// This function takes in list of parent alerts and decisions and then pairs them up. +func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decision, add_counters map[string]map[string]int) []*models.Alert { + for _, decision := range decisions { + //count and create separate alerts for each list + updateCounterForDecision(add_counters, decision, 1) + + /*CAPI might send lower case scopes, unify it.*/ + switch strings.ToLower(*decision.Scope) { + case "ip": + *decision.Scope = types.Ip + case "range": + *decision.Scope = types.Range + } + found := false + //add the individual decisions to the right list + for idx, alert := range alerts { + if *decision.Origin == SCOPE_CAPI { + if *alert.Source.Scope == SCOPE_CAPI { + alerts[idx].Decisions = append(alerts[idx].Decisions, decision) + found = true + break + } + } else if *decision.Origin == SCOPE_LISTS { + if *alert.Source.Scope == SCOPE_LISTS && *alert.Scenario == *decision.Scenario { + alerts[idx].Decisions = append(alerts[idx].Decisions, decision) + found = true + break + } + } else { + log.Warningf("unknown origin %s", *decision.Origin) + } + } + if !found { + log.Warningf("Orphaned decision for %s - %s", *decision.Origin, *decision.Scenario) + } + } + return alerts +} + +// we receive only one list of decisions, that we need to break-up : +// one alert for "community blocklist" +// one alert per list we're subscribed to +func (a *apic) PullTop() error { + var err error + + if lastPullIsOld, err := a.CAPIPullIsOld(); err != nil { + return err + } else if !lastPullIsOld { + return nil + } + + log.Infof("Starting community-blocklist update") + + data, _, err := a.apiClient.Decisions.GetStream(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup}) + if err != nil { + return errors.Wrap(err, "get stream") + } + a.startup = false + /*to count additions/deletions across lists*/ + + log.Debugf("Received %d new decisions", len(data.New)) + log.Debugf("Received %d deleted decisions", len(data.Deleted)) + + add_counters, delete_counters := makeAddAndDeleteCounters() + // process deleted decisions + if nbDeleted, err := a.HandleDeletedDecisions(data.Deleted, delete_counters); err != nil { + return err + } else { + log.Printf("capi/community-blocklist : %d explicit deletions", nbDeleted) + } + + if len(data.New) == 0 { + log.Infof("capi/community-blocklist : received 0 new entries (expected if you just installed crowdsec)") + return nil + } + + // we receive only one list of decisions, that we need to break-up : + // one alert for "community blocklist" + // one alert per list we're subscribed to + alertsFromCapi := createAlertsForDecisions(data.New) + alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, data.New, add_counters) + + for idx, alert := range alertsFromCapi { + alertsFromCapi[idx] = setAlertScenario(add_counters, delete_counters, alert) + log.Debugf("%s has %d decisions", *alertsFromCapi[idx].Source.Scope, len(alertsFromCapi[idx].Decisions)) + if a.dbClient.Type == "sqlite" && (a.dbClient.WalMode == nil || !*a.dbClient.WalMode) { + log.Warningf("sqlite is not using WAL mode, LAPI might become unresponsive when inserting the community blocklist") + } + alertID, inserted, deleted, err := a.dbClient.UpdateCommunityBlocklist(alertsFromCapi[idx]) + if err != nil { + return errors.Wrapf(err, "while saving alert from %s", *alertsFromCapi[idx].Source.Scope) + } + log.Printf("%s : added %d entries, deleted %d entries (alert:%d)", *alertsFromCapi[idx].Source.Scope, inserted, deleted, alertID) + } + return nil +} + +func setAlertScenario(add_counters map[string]map[string]int, delete_counters map[string]map[string]int, alert *models.Alert) *models.Alert { + if *alert.Source.Scope == SCOPE_CAPI { + *alert.Source.Scope = SCOPE_CAPI_ALIAS + alert.Scenario = types.StrPtr(fmt.Sprintf("update : +%d/-%d IPs", add_counters[SCOPE_CAPI]["all"], delete_counters[SCOPE_CAPI]["all"])) + } else if *alert.Source.Scope == SCOPE_LISTS { + *alert.Source.Scope = fmt.Sprintf("%s:%s", SCOPE_LISTS, *alert.Scenario) + alert.Scenario = types.StrPtr(fmt.Sprintf("update : +%d/-%d IPs", add_counters[SCOPE_LISTS][*alert.Scenario], delete_counters[SCOPE_LISTS][*alert.Scenario])) + } + return alert +} + +func (a *apic) Pull() error { + defer types.CatchPanic("lapi/pullFromAPIC") + + toldOnce := false + for { + scenario, err := a.FetchScenariosListFromDB() + if err != nil { + log.Errorf("unable to fetch scenarios from db: %s", err) + } + if len(scenario) > 0 { + break + } + if !toldOnce { + log.Warning("scenario list is empty, will not pull yet") + toldOnce = true + } + time.Sleep(1 * time.Second) + } + if err := a.PullTop(); err != nil { + log.Errorf("capi pull top: %s", err) + } + + log.Infof("Start pull from CrowdSec Central API (interval: %s once, then %s)", a.pullIntervalFirst.Round(time.Second), a.pullInterval) + ticker := time.NewTicker(a.pullIntervalFirst) + + for { + select { + case <-ticker.C: + ticker.Reset(a.pullInterval) + if err := a.PullTop(); err != nil { + log.Errorf("capi pull top: %s", err) + continue + } + case <-a.pullTomb.Dying(): // if one apic routine is dying, do we kill the others? + a.metricsTomb.Kill(nil) + a.pushTomb.Kill(nil) + return nil + } + } +} + +func (a *apic) GetMetrics() (*models.Metrics, error) { + metric := &models.Metrics{ + ApilVersion: types.StrPtr(cwversion.VersionStr()), + Machines: make([]*models.MetricsAgentInfo, 0), + Bouncers: make([]*models.MetricsBouncerInfo, 0), + } + machines, err := a.dbClient.ListMachines() + if err != nil { + return metric, err + } + bouncers, err := a.dbClient.ListBouncers() + if err != nil { + return metric, err + } + var lastpush string + for _, machine := range machines { + if machine.LastPush == nil { + lastpush = time.Time{}.String() + } else { + lastpush = machine.LastPush.String() + } + m := &models.MetricsAgentInfo{ + Version: machine.Version, + Name: machine.MachineId, + LastUpdate: machine.UpdatedAt.String(), + LastPush: lastpush, + } + metric.Machines = append(metric.Machines, m) + } + + for _, bouncer := range bouncers { + m := &models.MetricsBouncerInfo{ + Version: bouncer.Version, + CustomName: bouncer.Name, + Name: bouncer.Type, + LastPull: bouncer.LastPull.String(), + } + metric.Bouncers = append(metric.Bouncers, m) + } + return metric, nil +} + +func (a *apic) SendMetrics(stop chan (bool)) { + defer types.CatchPanic("lapi/metricsToAPIC") + + ticker := time.NewTicker(a.metricsIntervalFirst) + + log.Infof("Start send metrics to CrowdSec Central API (interval: %s once, then %s)", a.metricsIntervalFirst.Round(time.Second), a.metricsInterval) + + for { + metrics, err := a.GetMetrics() + if err != nil { + log.Errorf("unable to get metrics (%s), will retry", err) + } + _, _, err = a.apiClient.Metrics.Add(context.Background(), metrics) + if err != nil { + log.Errorf("capi metrics: failed: %s", err) + } else { + log.Infof("capi metrics: metrics sent successfully") + } + + select { + case <-stop: + return + case <-ticker.C: + ticker.Reset(a.metricsInterval) + case <-a.metricsTomb.Dying(): // if one apic routine is dying, do we kill the others? + a.pullTomb.Kill(nil) + a.pushTomb.Kill(nil) + return + } + } +} + +func (a *apic) Shutdown() { + a.pushTomb.Kill(nil) + a.pullTomb.Kill(nil) + a.metricsTomb.Kill(nil) +} + +func makeAddAndDeleteCounters() (map[string]map[string]int, map[string]map[string]int) { + add_counters := make(map[string]map[string]int) + add_counters[SCOPE_CAPI] = make(map[string]int) + add_counters[SCOPE_LISTS] = make(map[string]int) + + delete_counters := make(map[string]map[string]int) + delete_counters[SCOPE_CAPI] = make(map[string]int) + delete_counters[SCOPE_LISTS] = make(map[string]int) + + return add_counters, delete_counters +} + +func updateCounterForDecision(counter map[string]map[string]int, decision *models.Decision, totalDecisions int) { + if *decision.Origin == SCOPE_CAPI { + counter[*decision.Origin]["all"] += totalDecisions + } else if *decision.Origin == SCOPE_LISTS { + counter[*decision.Origin][*decision.Scenario] += totalDecisions + } else { + log.Warningf("Unknown origin %s", *decision.Origin) + } +} diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go new file mode 100644 index 0000000..a06a750 --- /dev/null +++ b/pkg/apiserver/apic_test.go @@ -0,0 +1,944 @@ +package apiserver + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/url" + "os" + "reflect" + "sync" + "testing" + "time" + + "github.com/jarcoal/httpmock" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func getDBClient(t *testing.T) *database.Client { + t.Helper() + dbPath, err := os.CreateTemp("", "*sqlite") + require.NoError(t, err) + dbClient, err := database.NewClient(&csconfig.DatabaseCfg{ + Type: "sqlite", + DbName: "crowdsec", + DbPath: dbPath.Name(), + }) + require.NoError(t, err) + return dbClient +} + +func getAPIC(t *testing.T) *apic { + t.Helper() + dbClient := getDBClient(t) + return &apic{ + alertToPush: make(chan []*models.Alert), + dbClient: dbClient, + mu: sync.Mutex{}, + startup: true, + pullTomb: tomb.Tomb{}, + pushTomb: tomb.Tomb{}, + metricsTomb: tomb.Tomb{}, + scenarioList: make([]string, 0), + consoleConfig: &csconfig.ConsoleConfig{ + ShareManualDecisions: types.BoolPtr(false), + ShareTaintedScenarios: types.BoolPtr(false), + ShareCustomScenarios: types.BoolPtr(false), + }, + } +} + +func absDiff(a int, b int) (c int) { + if c = a - b; c < 0 { + return -1 * c + } + return c +} + +func assertTotalDecisionCount(t *testing.T, dbClient *database.Client, count int) { + d := dbClient.Ent.Decision.Query().AllX(context.Background()) + assert.Len(t, d, count) +} + +func assertTotalValidDecisionCount(t *testing.T, dbClient *database.Client, count int) { + d := dbClient.Ent.Decision.Query().Where( + decision.UntilGT(time.Now()), + ).AllX(context.Background()) + assert.Len(t, d, count) +} + +func jsonMarshalX(v interface{}) []byte { + data, err := json.Marshal(v) + if err != nil { + panic(err) + } + return data +} + +func assertTotalAlertCount(t *testing.T, dbClient *database.Client, count int) { + d := dbClient.Ent.Alert.Query().AllX(context.Background()) + assert.Len(t, d, count) +} + +func TestAPICCAPIPullIsOld(t *testing.T) { + api := getAPIC(t) + + isOld, err := api.CAPIPullIsOld() + require.NoError(t, err) + assert.True(t, isOld) + + decision := api.dbClient.Ent.Decision.Create(). + SetUntil(time.Now().Add(time.Hour)). + SetScenario("crowdsec/test"). + SetType("IP"). + SetScope("Country"). + SetValue("Blah"). + SetOrigin(SCOPE_CAPI). + SaveX(context.Background()) + + api.dbClient.Ent.Alert.Create(). + SetCreatedAt(time.Now()). + SetScenario("crowdsec/test"). + AddDecisions( + decision, + ). + SaveX(context.Background()) + + isOld, err = api.CAPIPullIsOld() + require.NoError(t, err) + + assert.False(t, isOld) +} + +func TestAPICFetchScenariosListFromDB(t *testing.T) { + tests := []struct { + name string + machineIDsWithScenarios map[string]string + expectedScenarios []string + }{ + { + name: "Simple one machine with two scenarios", + machineIDsWithScenarios: map[string]string{ + "a": "crowdsecurity/http-bf,crowdsecurity/ssh-bf", + }, + expectedScenarios: []string{"crowdsecurity/ssh-bf", "crowdsecurity/http-bf"}, + }, + { + name: "Multi machine with custom+hub scenarios", + machineIDsWithScenarios: map[string]string{ + "a": "crowdsecurity/http-bf,crowdsecurity/ssh-bf,my_scenario", + "b": "crowdsecurity/http-bf,crowdsecurity/ssh-bf,foo_scenario", + }, + expectedScenarios: []string{"crowdsecurity/ssh-bf", "crowdsecurity/http-bf", "my_scenario", "foo_scenario"}, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + api := getAPIC(t) + for machineID, scenarios := range tc.machineIDsWithScenarios { + api.dbClient.Ent.Machine.Create(). + SetMachineId(machineID). + SetPassword(testPassword.String()). + SetIpAddress("1.2.3.4"). + SetScenarios(scenarios). + ExecX(context.Background()) + } + + scenarios, err := api.FetchScenariosListFromDB() + for machineID := range tc.machineIDsWithScenarios { + api.dbClient.Ent.Machine.Delete().Where(machine.MachineIdEQ(machineID)).ExecX(context.Background()) + } + require.NoError(t, err) + + assert.ElementsMatch(t, tc.expectedScenarios, scenarios) + }) + + } +} + +func TestNewAPIC(t *testing.T) { + var testConfig *csconfig.OnlineApiClientCfg + setConfig := func() { + testConfig = &csconfig.OnlineApiClientCfg{ + Credentials: &csconfig.ApiCredentialsCfg{ + URL: "foobar", + Login: "foo", + Password: "bar", + }, + } + } + type args struct { + dbClient *database.Client + consoleConfig *csconfig.ConsoleConfig + } + tests := []struct { + name string + args args + expectedErr string + action func() + }{ + { + name: "simple", + action: func() {}, + args: args{ + dbClient: getDBClient(t), + consoleConfig: LoadTestConfig().API.Server.ConsoleConfig, + }, + }, + { + name: "error in parsing URL", + action: func() { testConfig.Credentials.URL = "foobar http://" }, + args: args{ + dbClient: getDBClient(t), + consoleConfig: LoadTestConfig().API.Server.ConsoleConfig, + }, + expectedErr: "first path segment in URL cannot contain colon", + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + setConfig() + tc.action() + _, err := NewAPIC(testConfig, tc.args.dbClient, tc.args.consoleConfig) + cstest.RequireErrorContains(t, err, tc.expectedErr) + }) + } +} + +func TestAPICHandleDeletedDecisions(t *testing.T) { + api := getAPIC(t) + _, deleteCounters := makeAddAndDeleteCounters() + + decision1 := api.dbClient.Ent.Decision.Create(). + SetUntil(time.Now().Add(time.Hour)). + SetScenario("crowdsec/test"). + SetType("ban"). + SetScope("IP"). + SetValue("1.2.3.4"). + SetOrigin(SCOPE_CAPI). + SaveX(context.Background()) + + api.dbClient.Ent.Decision.Create(). + SetUntil(time.Now().Add(time.Hour)). + SetScenario("crowdsec/test"). + SetType("ban"). + SetScope("IP"). + SetValue("1.2.3.4"). + SetOrigin(SCOPE_CAPI). + SaveX(context.Background()) + + assertTotalDecisionCount(t, api.dbClient, 2) + + nbDeleted, err := api.HandleDeletedDecisions([]*models.Decision{{ + Value: types.StrPtr("1.2.3.4"), + Origin: &SCOPE_CAPI, + Type: &decision1.Type, + Scenario: types.StrPtr("crowdsec/test"), + Scope: types.StrPtr("IP"), + }}, deleteCounters) + + assert.NoError(t, err) + assert.Equal(t, 2, nbDeleted) + assert.Equal(t, 2, deleteCounters[SCOPE_CAPI]["all"]) +} + +func TestAPICGetMetrics(t *testing.T) { + cleanUp := func(api *apic) { + api.dbClient.Ent.Bouncer.Delete().ExecX(context.Background()) + api.dbClient.Ent.Machine.Delete().ExecX(context.Background()) + } + tests := []struct { + name string + machineIDs []string + bouncers []string + expectedMetric *models.Metrics + }{ + { + name: "simple", + machineIDs: []string{"a", "b", "c"}, + bouncers: []string{"1", "2", "3"}, + expectedMetric: &models.Metrics{ + ApilVersion: types.StrPtr(cwversion.VersionStr()), + Bouncers: []*models.MetricsBouncerInfo{ + { + CustomName: "1", + LastPull: time.Time{}.String(), + }, { + CustomName: "2", + LastPull: time.Time{}.String(), + }, { + CustomName: "3", + LastPull: time.Time{}.String(), + }, + }, + Machines: []*models.MetricsAgentInfo{ + { + Name: "a", + LastPush: time.Time{}.String(), + LastUpdate: time.Time{}.String(), + }, + { + Name: "b", + LastPush: time.Time{}.String(), + LastUpdate: time.Time{}.String(), + }, + { + Name: "c", + LastPush: time.Time{}.String(), + LastUpdate: time.Time{}.String(), + }, + }, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + apiClient := getAPIC(t) + cleanUp(apiClient) + for i, machineID := range tc.machineIDs { + apiClient.dbClient.Ent.Machine.Create(). + SetMachineId(machineID). + SetPassword(testPassword.String()). + SetIpAddress(fmt.Sprintf("1.2.3.%d", i)). + SetScenarios("crowdsecurity/test"). + SetLastPush(time.Time{}). + SetUpdatedAt(time.Time{}). + ExecX(context.Background()) + } + + for i, bouncerName := range tc.bouncers { + apiClient.dbClient.Ent.Bouncer.Create(). + SetIPAddress(fmt.Sprintf("1.2.3.%d", i)). + SetName(bouncerName). + SetAPIKey("foobar"). + SetRevoked(false). + SetLastPull(time.Time{}). + ExecX(context.Background()) + } + + foundMetrics, err := apiClient.GetMetrics() + require.NoError(t, err) + + assert.Equal(t, tc.expectedMetric.Bouncers, foundMetrics.Bouncers) + assert.Equal(t, tc.expectedMetric.Machines, foundMetrics.Machines) + + }) + } +} + +func TestCreateAlertsForDecision(t *testing.T) { + httpBfDecisionList := &models.Decision{ + Origin: &SCOPE_LISTS, + Scenario: types.StrPtr("crowdsecurity/http-bf"), + } + + sshBfDecisionList := &models.Decision{ + Origin: &SCOPE_LISTS, + Scenario: types.StrPtr("crowdsecurity/ssh-bf"), + } + + httpBfDecisionCommunity := &models.Decision{ + Origin: &SCOPE_CAPI, + Scenario: types.StrPtr("crowdsecurity/http-bf"), + } + + sshBfDecisionCommunity := &models.Decision{ + Origin: &SCOPE_CAPI, + Scenario: types.StrPtr("crowdsecurity/ssh-bf"), + } + type args struct { + decisions []*models.Decision + } + tests := []struct { + name string + args args + want []*models.Alert + }{ + { + name: "2 decisions CAPI List Decisions should create 2 alerts", + args: args{ + decisions: []*models.Decision{ + httpBfDecisionList, + sshBfDecisionList, + }, + }, + want: []*models.Alert{ + createAlertForDecision(httpBfDecisionList), + createAlertForDecision(sshBfDecisionList), + }, + }, + { + name: "2 decisions CAPI List same scenario decisions should create 1 alert", + args: args{ + decisions: []*models.Decision{ + httpBfDecisionList, + httpBfDecisionList, + }, + }, + want: []*models.Alert{ + createAlertForDecision(httpBfDecisionList), + }, + }, + { + name: "5 decisions from community list should create 1 alert", + args: args{ + decisions: []*models.Decision{ + httpBfDecisionCommunity, + httpBfDecisionCommunity, + sshBfDecisionCommunity, + sshBfDecisionCommunity, + sshBfDecisionCommunity, + }, + }, + want: []*models.Alert{ + createAlertForDecision(sshBfDecisionCommunity), + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + if got := createAlertsForDecisions(tc.args.decisions); !reflect.DeepEqual(got, tc.want) { + t.Errorf("createAlertsForDecisions() = %v, want %v", got, tc.want) + } + }) + } +} + +func TestFillAlertsWithDecisions(t *testing.T) { + httpBfDecisionCommunity := &models.Decision{ + Origin: &SCOPE_CAPI, + Scenario: types.StrPtr("crowdsecurity/http-bf"), + Scope: types.StrPtr("ip"), + } + + sshBfDecisionCommunity := &models.Decision{ + Origin: &SCOPE_CAPI, + Scenario: types.StrPtr("crowdsecurity/ssh-bf"), + Scope: types.StrPtr("ip"), + } + + httpBfDecisionList := &models.Decision{ + Origin: &SCOPE_LISTS, + Scenario: types.StrPtr("crowdsecurity/http-bf"), + Scope: types.StrPtr("ip"), + } + + sshBfDecisionList := &models.Decision{ + Origin: &SCOPE_LISTS, + Scenario: types.StrPtr("crowdsecurity/ssh-bf"), + Scope: types.StrPtr("ip"), + } + type args struct { + alerts []*models.Alert + decisions []*models.Decision + } + tests := []struct { + name string + args args + want []*models.Alert + }{ + { + name: "1 CAPI alert should pair up with n CAPI decisions", + args: args{ + alerts: []*models.Alert{createAlertForDecision(httpBfDecisionCommunity)}, + decisions: []*models.Decision{httpBfDecisionCommunity, sshBfDecisionCommunity, sshBfDecisionCommunity, httpBfDecisionCommunity}, + }, + want: []*models.Alert{ + func() *models.Alert { + a := createAlertForDecision(httpBfDecisionCommunity) + a.Decisions = []*models.Decision{httpBfDecisionCommunity, sshBfDecisionCommunity, sshBfDecisionCommunity, httpBfDecisionCommunity} + return a + }(), + }, + }, + { + name: "List alert should pair up only with decisions having same scenario", + args: args{ + alerts: []*models.Alert{createAlertForDecision(httpBfDecisionList), createAlertForDecision(sshBfDecisionList)}, + decisions: []*models.Decision{httpBfDecisionList, httpBfDecisionList, sshBfDecisionList, sshBfDecisionList}, + }, + want: []*models.Alert{ + func() *models.Alert { + a := createAlertForDecision(httpBfDecisionList) + a.Decisions = []*models.Decision{httpBfDecisionList, httpBfDecisionList} + return a + }(), + func() *models.Alert { + a := createAlertForDecision(sshBfDecisionList) + a.Decisions = []*models.Decision{sshBfDecisionList, sshBfDecisionList} + return a + }(), + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + addCounters, _ := makeAddAndDeleteCounters() + if got := fillAlertsWithDecisions(tc.args.alerts, tc.args.decisions, addCounters); !reflect.DeepEqual(got, tc.want) { + t.Errorf("fillAlertsWithDecisions() = %v, want %v", got, tc.want) + } + }) + } +} + +func TestAPICPullTop(t *testing.T) { + api := getAPIC(t) + api.dbClient.Ent.Decision.Create(). + SetOrigin(SCOPE_LISTS). + SetType("ban"). + SetValue("9.9.9.9"). + SetScope("Ip"). + SetScenario("crowdsecurity/ssh-bf"). + SetUntil(time.Now().Add(time.Hour)). + ExecX(context.Background()) + assertTotalDecisionCount(t, api.dbClient, 1) + assertTotalValidDecisionCount(t, api.dbClient, 1) + httpmock.Activate() + defer httpmock.DeactivateAndReset() + httpmock.RegisterResponder("GET", "http://api.crowdsec.net/api/decisions/stream", httpmock.NewBytesResponder( + 200, jsonMarshalX( + models.DecisionsStreamResponse{ + Deleted: models.GetDecisionsResponse{ + &models.Decision{ + Origin: &SCOPE_LISTS, + Scenario: types.StrPtr("crowdsecurity/ssh-bf"), + Value: types.StrPtr("9.9.9.9"), + Scope: types.StrPtr("Ip"), + Duration: types.StrPtr("24h"), + Type: types.StrPtr("ban"), + }, // This is already present in DB + &models.Decision{ + Origin: &SCOPE_LISTS, + Scenario: types.StrPtr("crowdsecurity/ssh-bf"), + Value: types.StrPtr("9.1.9.9"), + Scope: types.StrPtr("Ip"), + Duration: types.StrPtr("24h"), + Type: types.StrPtr("ban"), + }, // This not present in DB. + }, + New: models.GetDecisionsResponse{ + &models.Decision{ + Origin: &SCOPE_CAPI, + Scenario: types.StrPtr("crowdsecurity/test1"), + Value: types.StrPtr("1.2.3.4"), + Scope: types.StrPtr("Ip"), + Duration: types.StrPtr("24h"), + Type: types.StrPtr("ban"), + }, + &models.Decision{ + Origin: &SCOPE_CAPI, + Scenario: types.StrPtr("crowdsecurity/test2"), + Value: types.StrPtr("1.2.3.5"), + Scope: types.StrPtr("Ip"), + Duration: types.StrPtr("24h"), + Type: types.StrPtr("ban"), + }, // These two are from community list. + &models.Decision{ + Origin: &SCOPE_LISTS, + Scenario: types.StrPtr("crowdsecurity/http-bf"), + Value: types.StrPtr("1.2.3.6"), + Scope: types.StrPtr("Ip"), + Duration: types.StrPtr("24h"), + Type: types.StrPtr("ban"), + }, + &models.Decision{ + Origin: &SCOPE_LISTS, + Scenario: types.StrPtr("crowdsecurity/ssh-bf"), + Value: types.StrPtr("1.2.3.7"), + Scope: types.StrPtr("Ip"), + Duration: types.StrPtr("24h"), + Type: types.StrPtr("ban"), + }, // These two are from list subscription. + }, + }, + ), + )) + url, err := url.ParseRequestURI("http://api.crowdsec.net/") + require.NoError(t, err) + + apic, err := apiclient.NewDefaultClient( + url, + "/api", + fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + nil, + ) + require.NoError(t, err) + + api.apiClient = apic + err = api.PullTop() + require.NoError(t, err) + + assertTotalDecisionCount(t, api.dbClient, 5) + assertTotalValidDecisionCount(t, api.dbClient, 4) + assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. + alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) + validDecisions := api.dbClient.Ent.Decision.Query().Where( + decision.UntilGT(time.Now())). + AllX(context.Background()) + + decisionScenarioFreq := make(map[string]int) + alertScenario := make(map[string]int) + + for _, alert := range alerts { + alertScenario[alert.SourceScope]++ + } + assert.Equal(t, 3, len(alertScenario)) + assert.Equal(t, 1, alertScenario[SCOPE_CAPI_ALIAS]) + assert.Equal(t, 1, alertScenario["lists:crowdsecurity/ssh-bf"]) + assert.Equal(t, 1, alertScenario["lists:crowdsecurity/http-bf"]) + + for _, decisions := range validDecisions { + decisionScenarioFreq[decisions.Scenario]++ + } + + assert.Equal(t, 1, decisionScenarioFreq["crowdsecurity/http-bf"], 1) + assert.Equal(t, 1, decisionScenarioFreq["crowdsecurity/ssh-bf"], 1) + assert.Equal(t, 1, decisionScenarioFreq["crowdsecurity/test1"], 1) + assert.Equal(t, 1, decisionScenarioFreq["crowdsecurity/test2"], 1) +} + +func TestAPICPush(t *testing.T) { + tests := []struct { + name string + alerts []*models.Alert + expectedCalls int + }{ + { + name: "simple single alert", + alerts: []*models.Alert{ + { + Scenario: types.StrPtr("crowdsec/test"), + ScenarioHash: types.StrPtr("certified"), + ScenarioVersion: types.StrPtr("v1.0"), + Simulated: types.BoolPtr(false), + }, + }, + expectedCalls: 1, + }, + { + name: "simulated alert is not pushed", + alerts: []*models.Alert{ + { + Scenario: types.StrPtr("crowdsec/test"), + ScenarioHash: types.StrPtr("certified"), + ScenarioVersion: types.StrPtr("v1.0"), + Simulated: types.BoolPtr(true), + }, + }, + expectedCalls: 0, + }, + { + name: "1 request per 50 alerts", + expectedCalls: 2, + alerts: func() []*models.Alert { + alerts := make([]*models.Alert, 100) + for i := 0; i < 100; i++ { + alerts[i] = &models.Alert{ + Scenario: types.StrPtr("crowdsec/test"), + ScenarioHash: types.StrPtr("certified"), + ScenarioVersion: types.StrPtr("v1.0"), + Simulated: types.BoolPtr(false), + } + } + return alerts + }(), + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + api := getAPIC(t) + api.pushInterval = time.Millisecond + api.pushIntervalFirst = time.Millisecond + url, err := url.ParseRequestURI("http://api.crowdsec.net/") + require.NoError(t, err) + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + apic, err := apiclient.NewDefaultClient( + url, + "/api", + fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + nil, + ) + require.NoError(t, err) + + api.apiClient = apic + httpmock.RegisterResponder("POST", "http://api.crowdsec.net/api/signals", httpmock.NewBytesResponder(200, []byte{})) + go func() { + api.alertToPush <- tc.alerts + time.Sleep(time.Second) + api.Shutdown() + }() + err = api.Push() + require.NoError(t, err) + assert.Equal(t, tc.expectedCalls, httpmock.GetTotalCallCount()) + }) + } +} + +func TestAPICSendMetrics(t *testing.T) { + tests := []struct { + name string + duration time.Duration + expectedCalls int + setUp func(*apic) + metricsInterval time.Duration + }{ + { + name: "basic", + duration: time.Millisecond * 30, + metricsInterval: time.Millisecond * 5, + expectedCalls: 5, + setUp: func(api *apic) {}, + }, + { + name: "with some metrics", + duration: time.Millisecond * 30, + metricsInterval: time.Millisecond * 5, + expectedCalls: 5, + setUp: func(api *apic) { + api.dbClient.Ent.Machine.Delete().ExecX(context.Background()) + api.dbClient.Ent.Machine.Create(). + SetMachineId("1234"). + SetPassword(testPassword.String()). + SetIpAddress("1.2.3.4"). + SetScenarios("crowdsecurity/test"). + SetLastPush(time.Time{}). + SetUpdatedAt(time.Time{}). + ExecX(context.Background()) + + api.dbClient.Ent.Bouncer.Delete().ExecX(context.Background()) + api.dbClient.Ent.Bouncer.Create(). + SetIPAddress("1.2.3.6"). + SetName("someBouncer"). + SetAPIKey("foobar"). + SetRevoked(false). + SetLastPull(time.Time{}). + ExecX(context.Background()) + }, + }, + } + + httpmock.RegisterResponder("POST", "http://api.crowdsec.net/api/metrics/", httpmock.NewBytesResponder(200, []byte{})) + httpmock.Activate() + defer httpmock.Deactivate() + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + url, err := url.ParseRequestURI("http://api.crowdsec.net/") + require.NoError(t, err) + + apiClient, err := apiclient.NewDefaultClient( + url, + "/api", + fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + nil, + ) + require.NoError(t, err) + + api := getAPIC(t) + api.pushInterval = time.Millisecond + api.pushIntervalFirst = time.Millisecond + api.apiClient = apiClient + api.metricsInterval = tc.metricsInterval + api.metricsIntervalFirst = tc.metricsInterval + tc.setUp(api) + + stop := make(chan bool) + httpmock.ZeroCallCounters() + go api.SendMetrics(stop) + time.Sleep(tc.duration) + stop <- true + + info := httpmock.GetCallCountInfo() + noResponderCalls := info["NO_RESPONDER"] + responderCalls := info["POST http://api.crowdsec.net/api/metrics/"] + assert.LessOrEqual(t, absDiff(tc.expectedCalls, responderCalls), 2) + assert.Zero(t, noResponderCalls) + }) + } +} + +func TestAPICPull(t *testing.T) { + api := getAPIC(t) + tests := []struct { + name string + setUp func() + expectedDecisionCount int + logContains string + }{ + { + name: "test pull if no scenarios are present", + setUp: func() {}, + logContains: "scenario list is empty, will not pull yet", + }, + { + name: "test pull", + setUp: func() { + api.dbClient.Ent.Machine.Create(). + SetMachineId("1.2.3.4"). + SetPassword(testPassword.String()). + SetIpAddress("1.2.3.4"). + SetScenarios("crowdsecurity/ssh-bf"). + ExecX(context.Background()) + }, + expectedDecisionCount: 1, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + api = getAPIC(t) + api.pullInterval = time.Millisecond + api.pullIntervalFirst = time.Millisecond + url, err := url.ParseRequestURI("http://api.crowdsec.net/") + require.NoError(t, err) + httpmock.Activate() + defer httpmock.DeactivateAndReset() + apic, err := apiclient.NewDefaultClient( + url, + "/api", + fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), + nil, + ) + require.NoError(t, err) + api.apiClient = apic + httpmock.RegisterNoResponder(httpmock.NewBytesResponder(200, jsonMarshalX( + models.DecisionsStreamResponse{ + New: models.GetDecisionsResponse{ + &models.Decision{ + Origin: &SCOPE_CAPI, + Scenario: types.StrPtr("crowdsecurity/test2"), + Value: types.StrPtr("1.2.3.5"), + Scope: types.StrPtr("Ip"), + Duration: types.StrPtr("24h"), + Type: types.StrPtr("ban"), + }, + }, + }, + ))) + tc.setUp() + var buf bytes.Buffer + go func() { + logrus.SetOutput(&buf) + if err := api.Pull(); err != nil { + panic(err) + } + }() + //Slightly long because the CI runner for windows are slow, and this can lead to random failure + time.Sleep(time.Millisecond * 500) + logrus.SetOutput(os.Stderr) + assert.Contains(t, buf.String(), tc.logContains) + assertTotalDecisionCount(t, api.dbClient, tc.expectedDecisionCount) + }) + } +} + +func TestShouldShareAlert(t *testing.T) { + tests := []struct { + name string + consoleConfig *csconfig.ConsoleConfig + alert *models.Alert + expectedRet bool + expectedTrust string + }{ + { + name: "custom alert should be shared if config enables it", + consoleConfig: &csconfig.ConsoleConfig{ + ShareCustomScenarios: types.BoolPtr(true), + }, + alert: &models.Alert{Simulated: types.BoolPtr(false)}, + expectedRet: true, + expectedTrust: "custom", + }, + { + name: "custom alert should not be shared if config disables it", + consoleConfig: &csconfig.ConsoleConfig{ + ShareCustomScenarios: types.BoolPtr(false), + }, + alert: &models.Alert{Simulated: types.BoolPtr(false)}, + expectedRet: false, + expectedTrust: "custom", + }, + { + name: "manual alert should be shared if config enables it", + consoleConfig: &csconfig.ConsoleConfig{ + ShareManualDecisions: types.BoolPtr(true), + }, + alert: &models.Alert{ + Simulated: types.BoolPtr(false), + Decisions: []*models.Decision{{Origin: types.StrPtr("cscli")}}, + }, + expectedRet: true, + expectedTrust: "manual", + }, + { + name: "manual alert should not be shared if config disables it", + consoleConfig: &csconfig.ConsoleConfig{ + ShareManualDecisions: types.BoolPtr(false), + }, + alert: &models.Alert{ + Simulated: types.BoolPtr(false), + Decisions: []*models.Decision{{Origin: types.StrPtr("cscli")}}, + }, + expectedRet: false, + expectedTrust: "manual", + }, + { + name: "manual alert should be shared if config enables it", + consoleConfig: &csconfig.ConsoleConfig{ + ShareTaintedScenarios: types.BoolPtr(true), + }, + alert: &models.Alert{ + Simulated: types.BoolPtr(false), + ScenarioHash: types.StrPtr("whateverHash"), + }, + expectedRet: true, + expectedTrust: "tainted", + }, + { + name: "manual alert should not be shared if config disables it", + consoleConfig: &csconfig.ConsoleConfig{ + ShareTaintedScenarios: types.BoolPtr(false), + }, + alert: &models.Alert{ + Simulated: types.BoolPtr(false), + ScenarioHash: types.StrPtr("whateverHash"), + }, + expectedRet: false, + expectedTrust: "tainted", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + ret := shouldShareAlert(tc.alert, tc.consoleConfig) + assert.Equal(t, tc.expectedRet, ret) + }) + } +} diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go new file mode 100644 index 0000000..d59454f --- /dev/null +++ b/pkg/apiserver/apiserver.go @@ -0,0 +1,407 @@ +package apiserver + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "net" + "net/http" + "os" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/gin-gonic/gin" + "github.com/go-co-op/gocron" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/natefinch/lumberjack.v2" + "gopkg.in/tomb.v2" +) + +var ( + keyLength = 32 +) + +type APIServer struct { + URL string + TLS *csconfig.TLSCfg + dbClient *database.Client + logFile string + controller *controllers.Controller + flushScheduler *gocron.Scheduler + router *gin.Engine + httpServer *http.Server + apic *apic + httpServerTomb tomb.Tomb + consoleConfig *csconfig.ConsoleConfig +} + +// RecoveryWithWriter returns a middleware for a given writer that recovers from any panics and writes a 500 if there was one. +func CustomRecoveryWithWriter() gin.HandlerFunc { + return func(c *gin.Context) { + defer func() { + if err := recover(); err != nil { + // Check for a broken connection, as it is not really a + // condition that warrants a panic stack trace. + var brokenPipe bool + if ne, ok := err.(*net.OpError); ok { + if se, ok := ne.Err.(*os.SyscallError); ok { + if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { + brokenPipe = true + } + } + } + + // because of https://github.com/golang/net/blob/39120d07d75e76f0079fe5d27480bcb965a21e4c/http2/server.go + // and because it seems gin doesn't handle those neither, we need to "hand define" some errors to properly catch them + if strErr, ok := err.(error); ok { + //stolen from http2/server.go in x/net + var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") + ) + if strErr == errClientDisconnected || + strErr == errClosedBody || + strErr == errHandlerComplete || + strErr == errStreamClosed { + brokenPipe = true + } + } + + if brokenPipe { + log.Warningf("client %s disconnected : %s", c.ClientIP(), err) + c.Abort() + } else { + filename := types.WriteStackTrace(err) + log.Warningf("client %s error : %s", c.ClientIP(), err) + log.Warningf("stacktrace written to %s, please join to your issue", filename) + c.AbortWithStatus(http.StatusInternalServerError) + } + } + }() + c.Next() + } +} + +func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { + var flushScheduler *gocron.Scheduler + dbClient, err := database.NewClient(config.DbConfig) + if err != nil { + return &APIServer{}, errors.Wrap(err, "unable to init database client") + } + + if config.DbConfig.Flush != nil { + flushScheduler, err = dbClient.StartFlushScheduler(config.DbConfig.Flush) + if err != nil { + return &APIServer{}, err + } + } + + logFile := "" + if config.LogMedia == "file" { + logFile = fmt.Sprintf("%s/crowdsec_api.log", config.LogDir) + } + + if log.GetLevel() < log.DebugLevel { + gin.SetMode(gin.ReleaseMode) + } + log.Debugf("starting router, logging to %s", logFile) + router := gin.New() + + if config.TrustedProxies != nil && config.UseForwardedForHeaders { + if err := router.SetTrustedProxies(*config.TrustedProxies); err != nil { + return &APIServer{}, errors.Wrap(err, "while setting trusted_proxies") + } + router.ForwardedByClientIP = true + } else { + router.ForwardedByClientIP = false + } + + /*The logger that will be used by handlers*/ + clog := log.New() + + if err := types.ConfigureLogger(clog); err != nil { + return nil, errors.Wrap(err, "while configuring gin logger") + } + if config.LogLevel != nil { + clog.SetLevel(*config.LogLevel) + } + + /*Configure logs*/ + if logFile != "" { + _maxsize := 500 + if config.LogMaxSize != 0 { + _maxsize = config.LogMaxSize + } + _maxfiles := 3 + if config.LogMaxFiles != 0 { + _maxfiles = config.LogMaxFiles + } + _maxage := 28 + if config.LogMaxAge != 0 { + _maxage = config.LogMaxAge + } + _compress := true + if config.CompressLogs != nil { + _compress = *config.CompressLogs + } + /*cf. https://github.com/natefinch/lumberjack/issues/82 + let's create the file beforehand w/ the right perms */ + // check if file exists + _, err := os.Stat(logFile) + // create file if not exists, purposefully ignore errors + if os.IsNotExist(err) { + file, _ := os.OpenFile(logFile, os.O_RDWR|os.O_CREATE, 0600) + file.Close() + } + LogOutput := &lumberjack.Logger{ + Filename: logFile, + MaxSize: _maxsize, //megabytes + MaxBackups: _maxfiles, + MaxAge: _maxage, //days + Compress: _compress, //disabled by default + } + clog.SetOutput(LogOutput) + } + + gin.DefaultErrorWriter = clog.WriterLevel(log.ErrorLevel) + gin.DefaultWriter = clog.Writer() + + router.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string { + return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s \"%s\" %s\"\n", + param.ClientIP, + param.TimeStamp.Format(time.RFC1123), + param.Method, + param.Path, + param.Request.Proto, + param.StatusCode, + param.Latency, + param.Request.UserAgent(), + param.ErrorMessage, + ) + })) + + router.NoRoute(func(c *gin.Context) { + c.JSON(http.StatusNotFound, gin.H{"message": "Page or Method not found"}) + }) + router.Use(CustomRecoveryWithWriter()) + + controller := &controllers.Controller{ + DBClient: dbClient, + Ectx: context.Background(), + Router: router, + Profiles: config.Profiles, + Log: clog, + ConsoleConfig: config.ConsoleConfig, + } + + var apiClient *apic + + if config.OnlineClient != nil && config.OnlineClient.Credentials != nil { + log.Printf("Loading CAPI pusher") + apiClient, err = NewAPIC(config.OnlineClient, dbClient, config.ConsoleConfig) + if err != nil { + return &APIServer{}, err + } + controller.CAPIChan = apiClient.alertToPush + } else { + apiClient = nil + controller.CAPIChan = nil + } + if trustedIPs, err := config.GetTrustedIPs(); err == nil { + controller.TrustedIPs = trustedIPs + } else { + return &APIServer{}, err + } + + return &APIServer{ + URL: config.ListenURI, + TLS: config.TLS, + logFile: logFile, + dbClient: dbClient, + controller: controller, + flushScheduler: flushScheduler, + router: router, + apic: apiClient, + httpServerTomb: tomb.Tomb{}, + consoleConfig: config.ConsoleConfig, + }, nil + +} + +func (s *APIServer) Router() (*gin.Engine, error) { + return s.router, nil +} + +func (s *APIServer) GetTLSConfig() (*tls.Config, error) { + var caCert []byte + var err error + var caCertPool *x509.CertPool + var clientAuthType tls.ClientAuthType + + if s.TLS == nil { + return &tls.Config{}, nil + } + + if s.TLS.ClientVerification == "" { + //sounds like a sane default : verify client cert if given, but don't make it mandatory + clientAuthType = tls.VerifyClientCertIfGiven + } else { + clientAuthType, err = getTLSAuthType(s.TLS.ClientVerification) + if err != nil { + return nil, err + } + } + + if s.TLS.CACertPath != "" { + if clientAuthType > tls.RequestClientCert { + log.Infof("(tls) Client Auth Type set to %s", clientAuthType.String()) + caCert, err = os.ReadFile(s.TLS.CACertPath) + if err != nil { + return nil, errors.Wrap(err, "Error opening cert file") + } + caCertPool = x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + } + } + + return &tls.Config{ + ServerName: s.TLS.ServerName, //should it be removed ? + ClientAuth: clientAuthType, + ClientCAs: caCertPool, + MinVersion: tls.VersionTLS12, // TLS versions below 1.2 are considered insecure - see https://www.rfc-editor.org/rfc/rfc7525.txt for details + }, nil +} + +func (s *APIServer) Run(apiReady chan bool) error { + defer types.CatchPanic("lapi/runServer") + tlsCfg, err := s.GetTLSConfig() + if err != nil { + return errors.Wrap(err, "while creating TLS config") + } + s.httpServer = &http.Server{ + Addr: s.URL, + Handler: s.router, + TLSConfig: tlsCfg, + } + + if s.apic != nil { + s.apic.pushTomb.Go(func() error { + if err := s.apic.Push(); err != nil { + log.Errorf("capi push: %s", err) + return err + } + return nil + }) + s.apic.pullTomb.Go(func() error { + if err := s.apic.Pull(); err != nil { + log.Errorf("capi pull: %s", err) + return err + } + return nil + }) + s.apic.metricsTomb.Go(func() error { + s.apic.SendMetrics(make(chan bool)) + return nil + }) + } + + s.httpServerTomb.Go(func() error { + go func() { + apiReady <- true + log.Infof("CrowdSec Local API listening on %s", s.URL) + if s.TLS != nil && s.TLS.CertFilePath != "" && s.TLS.KeyFilePath != "" { + if err := s.httpServer.ListenAndServeTLS(s.TLS.CertFilePath, s.TLS.KeyFilePath); err != nil { + log.Fatal(err) + } + } else { + if err := s.httpServer.ListenAndServe(); err != http.ErrServerClosed { + log.Fatal(err) + } + } + }() + <-s.httpServerTomb.Dying() + return nil + }) + + return nil +} + +func (s *APIServer) Close() { + if s.apic != nil { + s.apic.Shutdown() // stop apic first since it use dbClient + } + s.dbClient.Ent.Close() + if s.flushScheduler != nil { + s.flushScheduler.Stop() + } +} + +func (s *APIServer) Shutdown() error { + s.Close() + if err := s.httpServer.Shutdown(context.TODO()); err != nil { + return err + } + + //close io.writer logger given to gin + if pipe, ok := gin.DefaultErrorWriter.(*io.PipeWriter); ok { + pipe.Close() + } + if pipe, ok := gin.DefaultWriter.(*io.PipeWriter); ok { + pipe.Close() + } + s.httpServerTomb.Kill(nil) + if err := s.httpServerTomb.Wait(); err != nil { + return errors.Wrap(err, "while waiting on httpServerTomb") + } + return nil +} + +func (s *APIServer) AttachPluginBroker(broker *csplugin.PluginBroker) { + s.controller.PluginChannel = broker.PluginChannel +} + +func (s *APIServer) InitController() error { + + err := s.controller.Init() + if err != nil { + return errors.Wrap(err, "controller init") + } + if s.TLS != nil { + var cacheExpiration time.Duration + if s.TLS.CacheExpiration != nil { + cacheExpiration = *s.TLS.CacheExpiration + } else { + cacheExpiration = time.Hour + } + s.controller.HandlerV1.Middlewares.JWT.TlsAuth, err = v1.NewTLSAuth(s.TLS.AllowedAgentsOU, s.TLS.CRLPath, + cacheExpiration, + log.WithFields(log.Fields{ + "component": "tls-auth", + "type": "agent", + })) + if err != nil { + return errors.Wrap(err, "while creating TLS auth for agents") + } + s.controller.HandlerV1.Middlewares.APIKey.TlsAuth, err = v1.NewTLSAuth(s.TLS.AllowedBouncersOU, s.TLS.CRLPath, + cacheExpiration, + log.WithFields(log.Fields{ + "component": "tls-auth", + "type": "bouncer", + })) + if err != nil { + return errors.Wrap(err, "while creating TLS auth for bouncers") + } + } + return err +} diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go new file mode 100644 index 0000000..04e1365 --- /dev/null +++ b/pkg/apiserver/apiserver_test.go @@ -0,0 +1,444 @@ +package apiserver + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + "time" + + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/gin-gonic/gin" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +var testMachineID = "test" +var testPassword = strfmt.Password("test") +var MachineTest = models.WatcherAuthRequest{ + MachineID: &testMachineID, + Password: &testPassword, +} + +var UserAgent = fmt.Sprintf("crowdsec-test/%s", cwversion.Version) +var emptyBody = strings.NewReader("") + +func LoadTestConfig() csconfig.Config { + config := csconfig.Config{} + maxAge := "1h" + flushConfig := csconfig.FlushDBCfg{ + MaxAge: &maxAge, + } + tempDir, _ := os.MkdirTemp("", "crowdsec_tests") + dbconfig := csconfig.DatabaseCfg{ + Type: "sqlite", + DbPath: filepath.Join(tempDir, "ent"), + Flush: &flushConfig, + } + apiServerConfig := csconfig.LocalApiServerCfg{ + ListenURI: "http://127.0.0.1:8080", + DbConfig: &dbconfig, + ProfilesPath: "./tests/profiles.yaml", + ConsoleConfig: &csconfig.ConsoleConfig{ + ShareManualDecisions: new(bool), + ShareTaintedScenarios: new(bool), + ShareCustomScenarios: new(bool), + }, + } + apiConfig := csconfig.APICfg{ + Server: &apiServerConfig, + } + config.API = &apiConfig + if err := config.API.Server.LoadProfiles(); err != nil { + log.Fatalf("failed to load profiles: %s", err) + } + return config +} + +func LoadTestConfigForwardedFor() csconfig.Config { + config := csconfig.Config{} + maxAge := "1h" + flushConfig := csconfig.FlushDBCfg{ + MaxAge: &maxAge, + } + tempDir, _ := os.MkdirTemp("", "crowdsec_tests") + dbconfig := csconfig.DatabaseCfg{ + Type: "sqlite", + DbPath: filepath.Join(tempDir, "ent"), + Flush: &flushConfig, + } + apiServerConfig := csconfig.LocalApiServerCfg{ + ListenURI: "http://127.0.0.1:8080", + DbConfig: &dbconfig, + ProfilesPath: "./tests/profiles.yaml", + UseForwardedForHeaders: true, + TrustedProxies: &[]string{"0.0.0.0/0"}, + ConsoleConfig: &csconfig.ConsoleConfig{ + ShareManualDecisions: new(bool), + ShareTaintedScenarios: new(bool), + ShareCustomScenarios: new(bool), + }, + } + apiConfig := csconfig.APICfg{ + Server: &apiServerConfig, + } + config.API = &apiConfig + if err := config.API.Server.LoadProfiles(); err != nil { + log.Fatalf("failed to load profiles: %s", err) + } + return config +} + +func NewAPIServer() (*APIServer, csconfig.Config, error) { + config := LoadTestConfig() + os.Remove("./ent") + apiServer, err := NewServer(config.API.Server) + if err != nil { + return nil, config, fmt.Errorf("unable to run local API: %s", err) + } + log.Printf("Creating new API server") + gin.SetMode(gin.TestMode) + return apiServer, config, nil +} + +func NewAPITest() (*gin.Engine, csconfig.Config, error) { + apiServer, config, err := NewAPIServer() + if err != nil { + return nil, config, fmt.Errorf("unable to run local API: %s", err) + } + err = apiServer.InitController() + if err != nil { + return nil, config, fmt.Errorf("unable to run local API: %s", err) + } + router, err := apiServer.Router() + if err != nil { + return nil, config, fmt.Errorf("unable to run local API: %s", err) + } + return router, config, nil +} + +func NewAPITestForwardedFor() (*gin.Engine, csconfig.Config, error) { + config := LoadTestConfigForwardedFor() + + os.Remove("./ent") + apiServer, err := NewServer(config.API.Server) + if err != nil { + return nil, config, fmt.Errorf("unable to run local API: %s", err) + } + err = apiServer.InitController() + if err != nil { + return nil, config, fmt.Errorf("unable to run local API: %s", err) + } + log.Printf("Creating new API server") + gin.SetMode(gin.TestMode) + router, err := apiServer.Router() + if err != nil { + return nil, config, fmt.Errorf("unable to run local API: %s", err) + } + return router, config, nil +} + +func ValidateMachine(machineID string, config *csconfig.DatabaseCfg) error { + dbClient, err := database.NewClient(config) + if err != nil { + return fmt.Errorf("unable to create new database client: %s", err) + } + if err := dbClient.ValidateMachine(machineID); err != nil { + return fmt.Errorf("unable to validate machine: %s", err) + } + return nil +} + +func GetMachineIP(machineID string, config *csconfig.DatabaseCfg) (string, error) { + dbClient, err := database.NewClient(config) + if err != nil { + return "", fmt.Errorf("unable to create new database client: %s", err) + } + machines, err := dbClient.ListMachines() + if err != nil { + return "", fmt.Errorf("Unable to list machines: %s", err) + } + for _, machine := range machines { + if machine.MachineId == machineID { + return machine.IpAddress, nil + } + } + return "", nil +} + +func GetAlertReaderFromFile(path string) *strings.Reader { + + alertContentBytes, err := os.ReadFile(path) + if err != nil { + log.Fatal(err) + } + + alerts := make([]*models.Alert, 0) + if err := json.Unmarshal(alertContentBytes, &alerts); err != nil { + log.Fatal(err) + } + + for _, alert := range alerts { + *alert.StartAt = time.Now().UTC().Format(time.RFC3339) + *alert.StopAt = time.Now().UTC().Format(time.RFC3339) + } + + alertContent, err := json.Marshal(alerts) + if err != nil { + log.Fatal(err) + } + return strings.NewReader(string(alertContent)) + +} + +func readDecisionsGetResp(resp *httptest.ResponseRecorder) ([]*models.Decision, int, error) { + var response []*models.Decision + if resp == nil { + return nil, 0, errors.New("response is nil") + } + err := json.Unmarshal(resp.Body.Bytes(), &response) + if err != nil { + return nil, resp.Code, err + } + return response, resp.Code, nil +} + +func readDecisionsErrorResp(resp *httptest.ResponseRecorder) (map[string]string, int, error) { + var response map[string]string + if resp == nil { + return nil, 0, errors.New("response is nil") + } + err := json.Unmarshal(resp.Body.Bytes(), &response) + if err != nil { + return nil, resp.Code, err + } + return response, resp.Code, nil +} + +func readDecisionsDeleteResp(resp *httptest.ResponseRecorder) (*models.DeleteDecisionResponse, int, error) { + var response models.DeleteDecisionResponse + if resp == nil { + return nil, 0, errors.New("response is nil") + } + err := json.Unmarshal(resp.Body.Bytes(), &response) + if err != nil { + return nil, resp.Code, err + } + return &response, resp.Code, nil +} + +func readDecisionsStreamResp(resp *httptest.ResponseRecorder) (map[string][]*models.Decision, int, error) { + response := make(map[string][]*models.Decision) + if resp == nil { + return nil, 0, errors.New("response is nil") + } + err := json.Unmarshal(resp.Body.Bytes(), &response) + if err != nil { + return nil, resp.Code, err + } + return response, resp.Code, nil +} + +func CreateTestMachine(router *gin.Engine) (string, error) { + b, err := json.Marshal(MachineTest) + if err != nil { + return "", fmt.Errorf("unable to marshal MachineTest") + } + body := string(b) + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Set("User-Agent", UserAgent) + router.ServeHTTP(w, req) + return body, nil +} + +func CreateTestBouncer(config *csconfig.DatabaseCfg) (string, error) { + dbClient, err := database.NewClient(config) + if err != nil { + log.Fatalf("unable to create new database client: %s", err) + } + apiKey, err := middlewares.GenerateAPIKey(keyLength) + if err != nil { + return "", fmt.Errorf("unable to generate api key: %s", err) + } + _, err = dbClient.CreateBouncer("test", "127.0.0.1", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType) + if err != nil { + return "", fmt.Errorf("unable to create blocker: %s", err) + } + + return apiKey, nil +} + +func TestWithWrongDBConfig(t *testing.T) { + config := LoadTestConfig() + config.API.Server.DbConfig.Type = "test" + apiServer, err := NewServer(config.API.Server) + + assert.Equal(t, apiServer, &APIServer{}) + assert.Equal(t, "unable to init database client: unknown database type 'test'", err.Error()) +} + +func TestWithWrongFlushConfig(t *testing.T) { + config := LoadTestConfig() + maxItems := -1 + config.API.Server.DbConfig.Flush.MaxItems = &maxItems + apiServer, err := NewServer(config.API.Server) + + assert.Equal(t, apiServer, &APIServer{}) + assert.Equal(t, "max_items can't be zero or negative number", err.Error()) +} + +func TestUnknownPath(t *testing.T) { + router, _, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodGet, "/test", nil) + req.Header.Set("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 404, w.Code) + +} + +/* + +ListenURI string `yaml:"listen_uri,omitempty"` //127.0.0.1:8080 + TLS *TLSCfg `yaml:"tls"` + DbConfig *DatabaseCfg `yaml:"-"` + LogDir string `yaml:"-"` + LogMedia string `yaml:"-"` + OnlineClient *OnlineApiClientCfg `yaml:"online_client"` + ProfilesPath string `yaml:"profiles_path,omitempty"` + Profiles []*ProfileCfg `yaml:"-"` + LogLevel *log.Level `yaml:"log_level"` + UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"` + +*/ + +func TestLoggingDebugToFileConfig(t *testing.T) { + + /*declare settings*/ + maxAge := "1h" + flushConfig := csconfig.FlushDBCfg{ + MaxAge: &maxAge, + } + tempDir, _ := os.MkdirTemp("", "crowdsec_tests") + dbconfig := csconfig.DatabaseCfg{ + Type: "sqlite", + DbPath: filepath.Join(tempDir, "ent"), + Flush: &flushConfig, + } + cfg := csconfig.LocalApiServerCfg{ + ListenURI: "127.0.0.1:8080", + LogMedia: "file", + LogDir: tempDir, + DbConfig: &dbconfig, + } + lvl := log.DebugLevel + expectedFile := fmt.Sprintf("%s/crowdsec_api.log", tempDir) + expectedLines := []string{"/test42"} + cfg.LogLevel = &lvl + + // Configure logging + if err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false); err != nil { + t.Fatal(err.Error()) + } + api, err := NewServer(&cfg) + if err != nil { + t.Fatalf("failed to create api : %s", err) + } + if api == nil { + t.Fatalf("failed to create api #2 is nbill") + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodGet, "/test42", nil) + req.Header.Set("User-Agent", UserAgent) + api.router.ServeHTTP(w, req) + assert.Equal(t, 404, w.Code) + //wait for the request to happen + time.Sleep(500 * time.Millisecond) + + //check file content + data, err := os.ReadFile(expectedFile) + if err != nil { + t.Fatalf("failed to read file : %s", err) + } + + for _, expectedStr := range expectedLines { + if !strings.Contains(string(data), expectedStr) { + t.Fatalf("expected %s in %s", expectedStr, string(data)) + } + } + +} + +func TestLoggingErrorToFileConfig(t *testing.T) { + + /*declare settings*/ + maxAge := "1h" + flushConfig := csconfig.FlushDBCfg{ + MaxAge: &maxAge, + } + tempDir, _ := os.MkdirTemp("", "crowdsec_tests") + dbconfig := csconfig.DatabaseCfg{ + Type: "sqlite", + DbPath: filepath.Join(tempDir, "ent"), + Flush: &flushConfig, + } + cfg := csconfig.LocalApiServerCfg{ + ListenURI: "127.0.0.1:8080", + LogMedia: "file", + LogDir: tempDir, + DbConfig: &dbconfig, + } + lvl := log.ErrorLevel + expectedFile := fmt.Sprintf("%s/crowdsec_api.log", tempDir) + cfg.LogLevel = &lvl + + // Configure logging + if err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false); err != nil { + t.Fatal(err.Error()) + } + api, err := NewServer(&cfg) + if err != nil { + t.Fatalf("failed to create api : %s", err) + } + if api == nil { + t.Fatalf("failed to create api #2 is nbill") + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodGet, "/test42", nil) + req.Header.Set("User-Agent", UserAgent) + api.router.ServeHTTP(w, req) + assert.Equal(t, 404, w.Code) + //wait for the request to happen + time.Sleep(500 * time.Millisecond) + + //check file content + x, err := os.ReadFile(expectedFile) + if err == nil && len(x) > 0 { + t.Fatalf("file should be empty, got '%s'", x) + } + + os.Remove("./crowdsec.log") + os.Remove(expectedFile) +} diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go new file mode 100644 index 0000000..ee8a861 --- /dev/null +++ b/pkg/apiserver/controllers/controller.go @@ -0,0 +1,150 @@ +package controllers + +import ( + "context" + "net" + "net/http" + + "github.com/alexliesenfeld/health" + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" +) + +type Controller struct { + Ectx context.Context + DBClient *database.Client + Router *gin.Engine + Profiles []*csconfig.ProfileCfg + CAPIChan chan []*models.Alert + PluginChannel chan csplugin.ProfileAlert + Log *log.Logger + ConsoleConfig *csconfig.ConsoleConfig + TrustedIPs []net.IPNet + HandlerV1 *v1.Controller +} + +func (c *Controller) Init() error { + if err := c.NewV1(); err != nil { + return err + } + + /* if we have a V2, just add + + if err := c.NewV2(); err != nil { + return err + } + + */ + + return nil +} + +// endpoint for health checking +func serveHealth() http.HandlerFunc { + checker := health.NewChecker( + // just simple up/down status is enough + health.WithDisabledDetails(), + // no caching required + health.WithDisabledCache(), + ) + return health.NewHandler(checker) +} + +func (c *Controller) NewV1() error { + var err error + + v1Config := v1.ControllerV1Config{ + DbClient: c.DBClient, + Ctx: c.Ectx, + ProfilesCfg: c.Profiles, + CapiChan: c.CAPIChan, + PluginChannel: c.PluginChannel, + ConsoleConfig: *c.ConsoleConfig, + TrustedIPs: c.TrustedIPs, + } + + c.HandlerV1, err = v1.New(&v1Config) + if err != nil { + return err + } + c.Router.GET("/health", gin.WrapF(serveHealth())) + c.Router.Use(v1.PrometheusMiddleware()) + c.Router.HandleMethodNotAllowed = true + c.Router.NoRoute(func(ctx *gin.Context) { + ctx.AbortWithStatus(http.StatusNotFound) + }) + c.Router.NoMethod(func(ctx *gin.Context) { + ctx.AbortWithStatus(http.StatusMethodNotAllowed) + }) + + groupV1 := c.Router.Group("/v1") + groupV1.POST("/watchers", c.HandlerV1.CreateMachine) + groupV1.POST("/watchers/login", c.HandlerV1.Middlewares.JWT.Middleware.LoginHandler) + + jwtAuth := groupV1.Group("") + jwtAuth.GET("/refresh_token", c.HandlerV1.Middlewares.JWT.Middleware.RefreshHandler) + jwtAuth.Use(c.HandlerV1.Middlewares.JWT.Middleware.MiddlewareFunc(), v1.PrometheusMachinesMiddleware()) + { + jwtAuth.POST("/alerts", c.HandlerV1.CreateAlert) + jwtAuth.GET("/alerts", c.HandlerV1.FindAlerts) + jwtAuth.HEAD("/alerts", c.HandlerV1.FindAlerts) + jwtAuth.GET("/alerts/:alert_id", c.HandlerV1.FindAlertByID) + jwtAuth.HEAD("/alerts/:alert_id", c.HandlerV1.FindAlertByID) + jwtAuth.DELETE("/alerts/:alert_id", c.HandlerV1.DeleteAlertByID) + jwtAuth.DELETE("/alerts", c.HandlerV1.DeleteAlerts) + jwtAuth.DELETE("/decisions", c.HandlerV1.DeleteDecisions) + jwtAuth.DELETE("/decisions/:decision_id", c.HandlerV1.DeleteDecisionById) + jwtAuth.GET("/heartbeat", c.HandlerV1.HeartBeat) + + } + + apiKeyAuth := groupV1.Group("") + apiKeyAuth.Use(c.HandlerV1.Middlewares.APIKey.MiddlewareFunc(), v1.PrometheusBouncersMiddleware()) + { + apiKeyAuth.GET("/decisions", c.HandlerV1.GetDecision) + apiKeyAuth.HEAD("/decisions", c.HandlerV1.GetDecision) + apiKeyAuth.GET("/decisions/stream", c.HandlerV1.StreamDecision) + apiKeyAuth.HEAD("/decisions/stream", c.HandlerV1.StreamDecision) + } + + return nil +} + +/* +func (c *Controller) NewV2() error { + handlerV2, err := v2.New(c.DBClient, c.Ectx) + if err != nil { + return err + } + + v2 := c.Router.Group("/v2") + v2.POST("/watchers", handlerV2.CreateMachine) + v2.POST("/watchers/login", handlerV2.Middlewares.JWT.Middleware.LoginHandler) + + jwtAuth := v2.Group("") + jwtAuth.GET("/refresh_token", handlerV2.Middlewares.JWT.Middleware.RefreshHandler) + jwtAuth.Use(handlerV2.Middlewares.JWT.Middleware.MiddlewareFunc()) + { + jwtAuth.POST("/alerts", handlerV2.CreateAlert) + jwtAuth.GET("/alerts", handlerV2.FindAlerts) + jwtAuth.DELETE("/alerts", handlerV2.DeleteAlerts) + jwtAuth.DELETE("/decisions", handlerV2.DeleteDecisions) + jwtAuth.DELETE("/decisions/:decision_id", handlerV2.DeleteDecisionById) + } + + apiKeyAuth := v2.Group("") + apiKeyAuth.Use(handlerV2.Middlewares.APIKey.MiddlewareFuncV2()) + { + apiKeyAuth.GET("/decisions", handlerV2.GetDecision) + apiKeyAuth.GET("/decisions/stream", handlerV2.StreamDecision) + } + + return nil +} + +*/ diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go new file mode 100644 index 0000000..1b227ff --- /dev/null +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -0,0 +1,299 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "strconv" + "time" + + jwt "github.com/appleboy/gin-jwt/v2" + + "github.com/crowdsecurity/crowdsec/pkg/csplugin" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" +) + +func FormatOneAlert(alert *ent.Alert) *models.Alert { + var outputAlert models.Alert + startAt := alert.StartedAt.String() + StopAt := alert.StoppedAt.String() + + machineID := "N/A" + if alert.Edges.Owner != nil { + machineID = alert.Edges.Owner.MachineId + } + + outputAlert = models.Alert{ + ID: int64(alert.ID), + MachineID: machineID, + CreatedAt: alert.CreatedAt.Format(time.RFC3339), + Scenario: &alert.Scenario, + ScenarioVersion: &alert.ScenarioVersion, + ScenarioHash: &alert.ScenarioHash, + Message: &alert.Message, + EventsCount: &alert.EventsCount, + StartAt: &startAt, + StopAt: &StopAt, + Capacity: &alert.Capacity, + Leakspeed: &alert.LeakSpeed, + Simulated: &alert.Simulated, + Source: &models.Source{ + Scope: &alert.SourceScope, + Value: &alert.SourceValue, + IP: alert.SourceIp, + Range: alert.SourceRange, + AsNumber: alert.SourceAsNumber, + AsName: alert.SourceAsName, + Cn: alert.SourceCountry, + Latitude: alert.SourceLatitude, + Longitude: alert.SourceLongitude, + }, + } + for _, eventItem := range alert.Edges.Events { + var Metas models.Meta + timestamp := eventItem.Time.String() + if err := json.Unmarshal([]byte(eventItem.Serialized), &Metas); err != nil { + log.Errorf("unable to unmarshall events meta '%s' : %s", eventItem.Serialized, err) + } + outputAlert.Events = append(outputAlert.Events, &models.Event{ + Timestamp: ×tamp, + Meta: Metas, + }) + } + for _, metaItem := range alert.Edges.Metas { + outputAlert.Meta = append(outputAlert.Meta, &models.MetaItems0{ + Key: metaItem.Key, + Value: metaItem.Value, + }) + } + for _, decisionItem := range alert.Edges.Decisions { + duration := decisionItem.Until.Sub(time.Now().UTC()).String() + outputAlert.Decisions = append(outputAlert.Decisions, &models.Decision{ + Duration: &duration, // transform into time.Time ? + Scenario: &decisionItem.Scenario, + Type: &decisionItem.Type, + Scope: &decisionItem.Scope, + Value: &decisionItem.Value, + Origin: &decisionItem.Origin, + Simulated: outputAlert.Simulated, + ID: int64(decisionItem.ID), + }) + } + return &outputAlert +} + +// FormatAlerts : Format results from the database to be swagger model compliant +func FormatAlerts(result []*ent.Alert) models.AddAlertsRequest { + var data models.AddAlertsRequest + for _, alertItem := range result { + data = append(data, FormatOneAlert(alertItem)) + } + return data +} + +func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uint) { + if c.PluginChannel != nil { + RETRY: + for try := 0; try < 3; try++ { + select { + case c.PluginChannel <- csplugin.ProfileAlert{ProfileID: profileID, Alert: alert}: + log.Debugf("alert sent to Plugin channel") + break RETRY + default: + log.Warningf("Cannot send alert to Plugin channel (try: %d)", try) + time.Sleep(time.Millisecond * 50) + } + } + } +} + +// CreateAlert writes the alerts received in the body to the database +func (c *Controller) CreateAlert(gctx *gin.Context) { + + var input models.AddAlertsRequest + + claims := jwt.ExtractClaims(gctx) + // TBD: use defined rather than hardcoded key to find back owner + machineID := claims["id"].(string) + + if err := gctx.ShouldBindJSON(&input); err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + } + if err := input.Validate(strfmt.Default); err != nil { + c.HandleDBErrors(gctx, err) + return + } + stopFlush := false + for _, alert := range input { + alert.MachineID = machineID + if len(alert.Decisions) != 0 { + for pIdx, profile := range c.Profiles { + _, matched, err := profile.EvaluateProfile(alert) + if err != nil { + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + if !matched { + continue + } + c.sendAlertToPluginChannel(alert, uint(pIdx)) + if profile.Cfg.OnSuccess == "break" { + break + } + } + decision := alert.Decisions[0] + if decision.Origin != nil && *decision.Origin == "cscli-import" { + stopFlush = true + } + continue + } + + for pIdx, profile := range c.Profiles { + profileDecisions, matched, err := profile.EvaluateProfile(alert) + if err != nil { + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + if !matched { + continue + } + + if len(alert.Decisions) == 0 { // non manual decision + alert.Decisions = append(alert.Decisions, profileDecisions...) + } + profileAlert := *alert + c.sendAlertToPluginChannel(&profileAlert, uint(pIdx)) + if profile.Cfg.OnSuccess == "break" { + break + } + } + } + + if stopFlush { + c.DBClient.CanFlush = false + } + + alerts, err := c.DBClient.CreateAlert(machineID, input) + c.DBClient.CanFlush = true + + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + if c.CAPIChan != nil { + select { + case c.CAPIChan <- input: + log.Debug("alert sent to CAPI channel") + default: + log.Warning("Cannot send alert to Central API channel") + } + } + + gctx.JSON(http.StatusCreated, alerts) +} + +// FindAlerts: returns alerts from the database based on the specified filter +func (c *Controller) FindAlerts(gctx *gin.Context) { + result, err := c.DBClient.QueryAlertWithFilter(gctx.Request.URL.Query()) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + data := FormatAlerts(result) + + if gctx.Request.Method == http.MethodHead { + gctx.String(http.StatusOK, "") + return + } + gctx.JSON(http.StatusOK, data) +} + +// FindAlertByID returns the alert associated with the ID +func (c *Controller) FindAlertByID(gctx *gin.Context) { + alertIDStr := gctx.Param("alert_id") + alertID, err := strconv.Atoi(alertIDStr) + if err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": "alert_id must be valid integer"}) + return + } + result, err := c.DBClient.GetAlertByID(alertID) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + data := FormatOneAlert(result) + + if gctx.Request.Method == http.MethodHead { + gctx.String(http.StatusOK, "") + return + } + gctx.JSON(http.StatusOK, data) +} + +// DeleteAlertByID delete the alert associated to the ID +func (c *Controller) DeleteAlertByID(gctx *gin.Context) { + var err error + + incomingIP := gctx.ClientIP() + if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) { + gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) + return + } + + decisionIDStr := gctx.Param("alert_id") + decisionID, err := strconv.Atoi(decisionIDStr) + if err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": "alert_id must be valid integer"}) + return + } + err = c.DBClient.DeleteAlertByID(decisionID) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + deleteAlertResp := models.DeleteAlertsResponse{ + NbDeleted: "1", + } + + gctx.JSON(http.StatusOK, deleteAlertResp) +} + + +// DeleteAlerts deletes alerts from the database based on the specified filter +func (c *Controller) DeleteAlerts(gctx *gin.Context) { + incomingIP := gctx.ClientIP() + if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) { + gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) + return + } + var err error + nbDeleted, err := c.DBClient.DeleteAlertWithFilter(gctx.Request.URL.Query()) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + deleteAlertsResp := models.DeleteAlertsResponse{ + NbDeleted: strconv.Itoa(nbDeleted), + } + gctx.JSON(http.StatusOK, deleteAlertsResp) +} + +func networksContainIP(networks []net.IPNet, ip string) bool { + parsedIP := net.ParseIP(ip) + for _, network := range networks { + if network.Contains(parsedIP) { + return true + } + } + return false +} diff --git a/pkg/apiserver/controllers/v1/controller.go b/pkg/apiserver/controllers/v1/controller.go new file mode 100644 index 0000000..f29d929 --- /dev/null +++ b/pkg/apiserver/controllers/v1/controller.go @@ -0,0 +1,63 @@ +package v1 + +import ( + "context" + "net" + + //"github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" + + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" + "github.com/crowdsecurity/crowdsec/pkg/csprofiles" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/pkg/errors" +) + +type Controller struct { + Ectx context.Context + DBClient *database.Client + APIKeyHeader string + Middlewares *middlewares.Middlewares + Profiles []*csprofiles.Runtime + CAPIChan chan []*models.Alert + PluginChannel chan csplugin.ProfileAlert + ConsoleConfig csconfig.ConsoleConfig + TrustedIPs []net.IPNet +} + +type ControllerV1Config struct { + DbClient *database.Client + Ctx context.Context + ProfilesCfg []*csconfig.ProfileCfg + CapiChan chan []*models.Alert + PluginChannel chan csplugin.ProfileAlert + ConsoleConfig csconfig.ConsoleConfig + TrustedIPs []net.IPNet +} + +func New(cfg *ControllerV1Config) (*Controller, error) { + var err error + + profiles, err := csprofiles.NewProfile(cfg.ProfilesCfg) + if err != nil { + return &Controller{}, errors.Wrapf(err, "failed to compile profiles") + } + + v1 := &Controller{ + Ectx: cfg.Ctx, + DBClient: cfg.DbClient, + APIKeyHeader: middlewares.APIKeyHeader, + Profiles: profiles, + CAPIChan: cfg.CapiChan, + PluginChannel: cfg.PluginChannel, + ConsoleConfig: cfg.ConsoleConfig, + TrustedIPs: cfg.TrustedIPs, + } + v1.Middlewares, err = middlewares.NewMiddlewares(cfg.DbClient) + if err != nil { + return v1, err + } + return v1, nil +} diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go new file mode 100644 index 0000000..02d6c7d --- /dev/null +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -0,0 +1,229 @@ +package v1 + +import ( + "net/http" + "strconv" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" +) + +//Format decisions for the bouncers, and deduplicate them by keeping only the longest one +func FormatDecisions(decisions []*ent.Decision, dedup bool) ([]*models.Decision, error) { + var results []*models.Decision + + seen := make(map[string]struct{}, 0) + + for _, dbDecision := range decisions { + if dedup { + key := dbDecision.Value + dbDecision.Scope + dbDecision.Type + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + } + duration := dbDecision.Until.Sub(time.Now().UTC()).String() + decision := models.Decision{ + ID: int64(dbDecision.ID), + Duration: &duration, + Scenario: &dbDecision.Scenario, + Scope: &dbDecision.Scope, + Value: &dbDecision.Value, + Type: &dbDecision.Type, + Origin: &dbDecision.Origin, + } + results = append(results, &decision) + } + return results, nil +} + +func (c *Controller) GetDecision(gctx *gin.Context) { + var err error + var results []*models.Decision + var data []*ent.Decision + + bouncerInfo, err := getBouncerFromContext(gctx) + if err != nil { + gctx.JSON(http.StatusUnauthorized, gin.H{"message": "not allowed"}) + return + } + + data, err = c.DBClient.QueryDecisionWithFilter(gctx.Request.URL.Query()) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + results, err = FormatDecisions(data, false) + if err != nil { + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + /*let's follow a naive logic : when a bouncer queries /decisions, if the answer is empty, we assume there is no decision for this ip/user/..., + but if it's non-empty, it means that there is one or more decisions for this target*/ + if len(results) > 0 { + PrometheusBouncersHasNonEmptyDecision(gctx) + } else { + PrometheusBouncersHasEmptyDecision(gctx) + } + + if gctx.Request.Method == http.MethodHead { + gctx.String(http.StatusOK, "") + return + } + + if time.Now().UTC().Sub(bouncerInfo.LastPull) >= time.Minute { + if err := c.DBClient.UpdateBouncerLastPull(time.Now().UTC(), bouncerInfo.ID); err != nil { + log.Errorf("failed to update bouncer last pull: %v", err) + } + } + + gctx.JSON(http.StatusOK, results) +} + +func (c *Controller) DeleteDecisionById(gctx *gin.Context) { + var err error + + decisionIDStr := gctx.Param("decision_id") + decisionID, err := strconv.Atoi(decisionIDStr) + if err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": "decision_id must be valid integer"}) + return + } + nbDeleted, err := c.DBClient.SoftDeleteDecisionByID(decisionID) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + deleteDecisionResp := models.DeleteDecisionResponse{ + NbDeleted: strconv.Itoa(nbDeleted), + } + + gctx.JSON(http.StatusOK, deleteDecisionResp) +} + +func (c *Controller) DeleteDecisions(gctx *gin.Context) { + var err error + + nbDeleted, err := c.DBClient.SoftDeleteDecisionsWithFilter(gctx.Request.URL.Query()) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + deleteDecisionResp := models.DeleteDecisionResponse{ + NbDeleted: nbDeleted, + } + + gctx.JSON(http.StatusOK, deleteDecisionResp) +} + +func (c *Controller) StreamDecision(gctx *gin.Context) { + var data []*ent.Decision + var err error + ret := make(map[string][]*models.Decision, 0) + ret["new"] = []*models.Decision{} + ret["deleted"] = []*models.Decision{} + streamStartTime := time.Now().UTC() + + bouncerInfo, err := getBouncerFromContext(gctx) + if err != nil { + gctx.JSON(http.StatusUnauthorized, gin.H{"message": "not allowed"}) + return + } + + filters := gctx.Request.URL.Query() + if _, ok := filters["scopes"]; !ok { + filters["scopes"] = []string{"ip,range"} + } + + dedup := true + if v, ok := filters["dedup"]; ok && v[0] == "false" { + dedup = false + } + + // if the blocker just start, return all decisions + if val, ok := gctx.Request.URL.Query()["startup"]; ok { + if val[0] == "true" { + data, err = c.DBClient.QueryAllDecisionsWithFilters(filters) + if err != nil { + log.Errorf("failed querying decisions: %v", err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + //data = KeepLongestDecision(data) + ret["new"], err = FormatDecisions(data, dedup) + if err != nil { + log.Errorf("unable to format expired decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + // getting expired decisions + data, err = c.DBClient.QueryExpiredDecisionsWithFilters(filters) + if err != nil { + log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + ret["deleted"], err = FormatDecisions(data, dedup) + if err != nil { + log.Errorf("unable to format expired decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + if err := c.DBClient.UpdateBouncerLastPull(streamStartTime, bouncerInfo.ID); err != nil { + log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + if gctx.Request.Method == http.MethodHead { + gctx.String(http.StatusOK, "") + return + } + gctx.JSON(http.StatusOK, ret) + return + } + } + + // getting new decisions + data, err = c.DBClient.QueryNewDecisionsSinceWithFilters(bouncerInfo.LastPull, filters) + if err != nil { + log.Errorf("unable to query new decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + //data = KeepLongestDecision(data) + ret["new"], err = FormatDecisions(data, dedup) + if err != nil { + log.Errorf("unable to format new decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + // getting expired decisions + data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(bouncerInfo.LastPull.Add((-2 * time.Second)), filters) // do we want to give exactly lastPull time ? + if err != nil { + log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + ret["deleted"], err = FormatDecisions(data, dedup) + if err != nil { + log.Errorf("unable to format expired decision for '%s' : %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + if err := c.DBClient.UpdateBouncerLastPull(streamStartTime, bouncerInfo.ID); err != nil { + log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } + + gctx.JSON(http.StatusOK, ret) +} diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go new file mode 100644 index 0000000..5edf0d6 --- /dev/null +++ b/pkg/apiserver/controllers/v1/errors.go @@ -0,0 +1,38 @@ +package v1 + +import ( + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" +) + +func (c *Controller) HandleDBErrors(gctx *gin.Context, err error) { + switch errors.Cause(err) { + case database.ItemNotFound: + gctx.JSON(http.StatusNotFound, gin.H{"message": err.Error()}) + return + case database.UserExists: + gctx.JSON(http.StatusForbidden, gin.H{"message": err.Error()}) + return + case database.HashError: + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + case database.InsertFail: + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + case database.QueryFail: + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + case database.ParseTimeFail: + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + case database.ParseDurationFail: + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + default: + gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) + return + } +} diff --git a/pkg/apiserver/controllers/v1/heartbeat.go b/pkg/apiserver/controllers/v1/heartbeat.go new file mode 100644 index 0000000..bf6fd57 --- /dev/null +++ b/pkg/apiserver/controllers/v1/heartbeat.go @@ -0,0 +1,21 @@ +package v1 + +import ( + "net/http" + + jwt "github.com/appleboy/gin-jwt/v2" + "github.com/gin-gonic/gin" +) + +func (c *Controller) HeartBeat(gctx *gin.Context) { + + claims := jwt.ExtractClaims(gctx) + // TBD: use defined rather than hardcoded key to find back owner + machineID := claims["id"].(string) + + if err := c.DBClient.UpdateMachineLastHeartBeat(machineID); err != nil { + c.HandleDBErrors(gctx, err) + return + } + gctx.Status(http.StatusOK) +} diff --git a/pkg/apiserver/controllers/v1/machines.go b/pkg/apiserver/controllers/v1/machines.go new file mode 100644 index 0000000..b4f28d9 --- /dev/null +++ b/pkg/apiserver/controllers/v1/machines.go @@ -0,0 +1,31 @@ +package v1 + +import ( + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" +) + +func (c *Controller) CreateMachine(gctx *gin.Context) { + var err error + var input models.WatcherRegistrationRequest + if err = gctx.ShouldBindJSON(&input); err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + } + if err = input.Validate(strfmt.Default); err != nil { + c.HandleDBErrors(gctx, err) + return + } + + _, err = c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), false, false, types.PasswordAuthType) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + gctx.Status(http.StatusCreated) +} diff --git a/pkg/apiserver/controllers/v1/metrics.go b/pkg/apiserver/controllers/v1/metrics.go new file mode 100644 index 0000000..0f3bdb6 --- /dev/null +++ b/pkg/apiserver/controllers/v1/metrics.go @@ -0,0 +1,120 @@ +package v1 + +import ( + "time" + + jwt "github.com/appleboy/gin-jwt/v2" + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" +) + +/*prometheus*/ +var LapiRouteHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_lapi_route_requests_total", + Help: "Number of calls to each route per method.", + }, + []string{"route", "method"}, +) + +/*hits per machine*/ +var LapiMachineHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_lapi_machine_requests_total", + Help: "Number of calls to each route per method grouped by machines.", + }, + []string{"machine", "route", "method"}, +) + +/*hits per bouncer*/ +var LapiBouncerHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_lapi_bouncer_requests_total", + Help: "Number of calls to each route per method grouped by bouncers.", + }, + []string{"bouncer", "route", "method"}, +) + +/* keep track of the number of calls (per bouncer) that lead to nil/non-nil responses. +while it's not exact, it's a good way to know - when you have a rutpure bouncer - what is the rate of ok/ko answers you got from lapi*/ +var LapiNilDecisions = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_lapi_decisions_ko_total", + Help: "Number of calls to /decisions that returned nil result.", + }, + []string{"bouncer"}, +) + +/*hits per bouncer*/ +var LapiNonNilDecisions = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_lapi_decisions_ok_total", + Help: "Number of calls to /decisions that returned non-nil result.", + }, + []string{"bouncer"}, +) + +var LapiResponseTime = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "cs_lapi_request_duration_seconds", + Help: "Response time of LAPI", + Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1}, + }, + []string{"endpoint", "method"}) + +func PrometheusBouncersHasEmptyDecision(c *gin.Context) { + name, ok := c.Get("BOUNCER_NAME") + if ok { + LapiNilDecisions.With(prometheus.Labels{ + "bouncer": name.(string)}).Inc() + } +} + +func PrometheusBouncersHasNonEmptyDecision(c *gin.Context) { + name, ok := c.Get("BOUNCER_NAME") + if ok { + LapiNonNilDecisions.With(prometheus.Labels{ + "bouncer": name.(string)}).Inc() + } +} + +func PrometheusMachinesMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + claims := jwt.ExtractClaims(c) + if claims != nil { + if rawID, ok := claims["id"]; ok { + machineID := rawID.(string) + LapiMachineHits.With(prometheus.Labels{ + "machine": machineID, + "route": c.Request.URL.Path, + "method": c.Request.Method}).Inc() + } + } + c.Next() + } +} + +func PrometheusBouncersMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + name, ok := c.Get("BOUNCER_NAME") + if ok { + LapiBouncerHits.With(prometheus.Labels{ + "bouncer": name.(string), + "route": c.Request.URL.Path, + "method": c.Request.Method}).Inc() + } + c.Next() + } +} + +func PrometheusMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + startTime := time.Now() + LapiRouteHits.With(prometheus.Labels{ + "route": c.Request.URL.Path, + "method": c.Request.Method}).Inc() + c.Next() + elapsed := time.Since(startTime) + LapiResponseTime.With(prometheus.Labels{"method": c.Request.Method, "endpoint": c.Request.URL.Path}).Observe(elapsed.Seconds()) + } +} diff --git a/pkg/apiserver/controllers/v1/utils.go b/pkg/apiserver/controllers/v1/utils.go new file mode 100644 index 0000000..b7c413d --- /dev/null +++ b/pkg/apiserver/controllers/v1/utils.go @@ -0,0 +1,26 @@ +package v1 + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/gin-gonic/gin" +) + +var ( + bouncerContextKey = "bouncer_info" +) + +func getBouncerFromContext(ctx *gin.Context) (*ent.Bouncer, error) { + bouncerInterface, exist := ctx.Get(bouncerContextKey) + if !exist { + return nil, fmt.Errorf("bouncer not found") + } + + bouncerInfo, ok := bouncerInterface.(*ent.Bouncer) + if !ok { + return nil, fmt.Errorf("bouncer not found") + } + + return bouncerInfo, nil +} diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go new file mode 100644 index 0000000..5f92b1f --- /dev/null +++ b/pkg/apiserver/decisions_test.go @@ -0,0 +1,353 @@ +package apiserver + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + APIKEY = "apikey" + PASSWORD = "password" +) + +func TestDeleteDecisionRange(t *testing.T) { + lapi := SetupLAPITest(t) + + // Create Valid Alert + lapi.InsertAlertFromFile("./tests/alert_minibulk.json") + + // delete by ip wrong + w := lapi.RecordResponse("DELETE", "/v1/decisions?range=1.2.3.0/24", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + + assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + + // delete by range + + w = lapi.RecordResponse("DELETE", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) + + // delete by range : ensure it was already deleted + + w = lapi.RecordResponse("DELETE", "/v1/decisions?range=91.121.79.0/24", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) +} + +func TestDeleteDecisionFilter(t *testing.T) { + lapi := SetupLAPITest(t) + + // Create Valid Alert + lapi.InsertAlertFromFile("./tests/alert_minibulk.json") + + // delete by ip wrong + + w := lapi.RecordResponse("DELETE", "/v1/decisions?ip=1.2.3.4", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + + // delete by ip good + + w = lapi.RecordResponse("DELETE", "/v1/decisions?ip=91.121.79.179", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + + // delete by scope/value + + w = lapi.RecordResponse("DELETE", "/v1/decisions?scopes=Ip&value=91.121.79.178", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) +} + +func TestDeleteDecisionFilterByScenario(t *testing.T) { + lapi := SetupLAPITest(t) + + // Create Valid Alert + lapi.InsertAlertFromFile("./tests/alert_minibulk.json") + + // delete by wrong scenario + + w := lapi.RecordResponse("DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bff", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + + // delete by scenario good + + w = lapi.RecordResponse("DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bf", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) +} + +func TestGetDecisionFilters(t *testing.T) { + lapi := SetupLAPITest(t) + + // Create Valid Alert + lapi.InsertAlertFromFile("./tests/alert_minibulk.json") + + // Get Decision + + w := lapi.RecordResponse("GET", "/v1/decisions", emptyBody, APIKEY) + assert.Equal(t, 200, w.Code) + decisions, code, err := readDecisionsGetResp(w) + assert.Nil(t, err) + assert.Equal(t, 200, code) + assert.Equal(t, 2, len(decisions)) + assert.Equal(t, "crowdsecurity/ssh-bf", *decisions[0].Scenario) + assert.Equal(t, "91.121.79.179", *decisions[0].Value) + assert.Equal(t, int64(1), decisions[0].ID) + assert.Equal(t, "crowdsecurity/ssh-bf", *decisions[1].Scenario) + assert.Equal(t, "91.121.79.178", *decisions[1].Value) + assert.Equal(t, int64(2), decisions[1].ID) + + // Get Decision : type filter + + w = lapi.RecordResponse("GET", "/v1/decisions?type=ban", emptyBody, APIKEY) + assert.Equal(t, 200, w.Code) + decisions, code, err = readDecisionsGetResp(w) + assert.Nil(t, err) + assert.Equal(t, 200, code) + assert.Equal(t, 2, len(decisions)) + assert.Equal(t, "crowdsecurity/ssh-bf", *decisions[0].Scenario) + assert.Equal(t, "91.121.79.179", *decisions[0].Value) + assert.Equal(t, int64(1), decisions[0].ID) + assert.Equal(t, "crowdsecurity/ssh-bf", *decisions[1].Scenario) + assert.Equal(t, "91.121.79.178", *decisions[1].Value) + assert.Equal(t, int64(2), decisions[1].ID) + + // assert.Contains(t, w.Body.String(), `"id":1,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","type":"ban","value":"91.121.79.179"`) + // assert.Contains(t, w.Body.String(), `"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","type":"ban","value":"91.121.79.178"`) + + // Get Decision : scope/value + + w = lapi.RecordResponse("GET", "/v1/decisions?scopes=Ip&value=91.121.79.179", emptyBody, APIKEY) + assert.Equal(t, 200, w.Code) + decisions, code, err = readDecisionsGetResp(w) + assert.Nil(t, err) + assert.Equal(t, 200, code) + assert.Equal(t, 1, len(decisions)) + assert.Equal(t, "crowdsecurity/ssh-bf", *decisions[0].Scenario) + assert.Equal(t, "91.121.79.179", *decisions[0].Value) + assert.Equal(t, int64(1), decisions[0].ID) + + // assert.Contains(t, w.Body.String(), `"id":1,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","type":"ban","value":"91.121.79.179"`) + // assert.NotContains(t, w.Body.String(), `"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","type":"ban","value":"91.121.79.178"`) + + // Get Decision : ip filter + + w = lapi.RecordResponse("GET", "/v1/decisions?ip=91.121.79.179", emptyBody, APIKEY) + assert.Equal(t, 200, w.Code) + decisions, code, err = readDecisionsGetResp(w) + assert.Nil(t, err) + assert.Equal(t, 200, code) + assert.Equal(t, 1, len(decisions)) + assert.Equal(t, "crowdsecurity/ssh-bf", *decisions[0].Scenario) + assert.Equal(t, "91.121.79.179", *decisions[0].Value) + assert.Equal(t, int64(1), decisions[0].ID) + + // assert.Contains(t, w.Body.String(), `"id":1,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","type":"ban","value":"91.121.79.179"`) + // assert.NotContains(t, w.Body.String(), `"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","type":"ban","value":"91.121.79.178"`) + + // Get decision : by range + w = lapi.RecordResponse("GET", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, APIKEY) + assert.Equal(t, 200, w.Code) + decisions, code, err = readDecisionsGetResp(w) + assert.Nil(t, err) + assert.Equal(t, 200, code) + assert.Equal(t, 2, len(decisions)) + assert.Contains(t, []string{*decisions[0].Value, *decisions[1].Value}, "91.121.79.179") + assert.Contains(t, []string{*decisions[0].Value, *decisions[1].Value}, "91.121.79.178") + +} + +func TestGetDecision(t *testing.T) { + lapi := SetupLAPITest(t) + + // Create Valid Alert + lapi.InsertAlertFromFile("./tests/alert_sample.json") + + // Get Decision + w := lapi.RecordResponse("GET", "/v1/decisions", emptyBody, APIKEY) + assert.Equal(t, 200, w.Code) + decisions, code, err := readDecisionsGetResp(w) + assert.Nil(t, err) + assert.Equal(t, 200, code) + assert.Equal(t, 3, len(decisions)) + /*decisions get doesn't perform deduplication*/ + assert.Equal(t, "crowdsecurity/test", *decisions[0].Scenario) + assert.Equal(t, "127.0.0.1", *decisions[0].Value) + assert.Equal(t, int64(1), decisions[0].ID) + + assert.Equal(t, "crowdsecurity/test", *decisions[1].Scenario) + assert.Equal(t, "127.0.0.1", *decisions[1].Value) + assert.Equal(t, int64(2), decisions[1].ID) + + assert.Equal(t, "crowdsecurity/test", *decisions[2].Scenario) + assert.Equal(t, "127.0.0.1", *decisions[2].Value) + assert.Equal(t, int64(3), decisions[2].ID) + + // Get Decision with invalid filter. It should ignore this filter + w = lapi.RecordResponse("GET", "/v1/decisions?test=test", emptyBody, APIKEY) + assert.Equal(t, 200, w.Code) + assert.Equal(t, 3, len(decisions)) +} + +func TestDeleteDecisionByID(t *testing.T) { + lapi := SetupLAPITest(t) + + // Create Valid Alert + lapi.InsertAlertFromFile("./tests/alert_sample.json") + + //Have one alerts + w := lapi.RecordResponse("GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + decisions, code, err := readDecisionsStreamResp(w) + assert.Equal(t, err, nil) + assert.Equal(t, 200, code) + assert.Equal(t, 0, len(decisions["deleted"])) + assert.Equal(t, 1, len(decisions["new"])) + + // Delete alert with Invalid ID + w = lapi.RecordResponse("DELETE", "/v1/decisions/test", emptyBody, PASSWORD) + assert.Equal(t, 400, w.Code) + err_resp, _, err := readDecisionsErrorResp(w) + assert.NoError(t, err) + assert.Equal(t, "decision_id must be valid integer", err_resp["message"]) + + // Delete alert with ID that not exist + w = lapi.RecordResponse("DELETE", "/v1/decisions/100", emptyBody, PASSWORD) + assert.Equal(t, 500, w.Code) + err_resp, _, err = readDecisionsErrorResp(w) + assert.NoError(t, err) + assert.Equal(t, "decision with id '100' doesn't exist: unable to delete", err_resp["message"]) + + //Have one alerts + w = lapi.RecordResponse("GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + decisions, code, err = readDecisionsStreamResp(w) + assert.Equal(t, err, nil) + assert.Equal(t, 200, code) + assert.Equal(t, 0, len(decisions["deleted"])) + assert.Equal(t, 1, len(decisions["new"])) + + // Delete alert with valid ID + w = lapi.RecordResponse("DELETE", "/v1/decisions/1", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + resp, _, err := readDecisionsDeleteResp(w) + assert.NoError(t, err) + assert.Equal(t, resp.NbDeleted, "1") + + //Have one alert (because we delete an alert that has dup targets) + w = lapi.RecordResponse("GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + decisions, code, err = readDecisionsStreamResp(w) + assert.Equal(t, err, nil) + assert.Equal(t, 200, code) + assert.Equal(t, 0, len(decisions["deleted"])) + assert.Equal(t, 1, len(decisions["new"])) +} + +func TestDeleteDecision(t *testing.T) { + lapi := SetupLAPITest(t) + + // Create Valid Alert + lapi.InsertAlertFromFile("./tests/alert_sample.json") + + // Delete alert with Invalid filter + w := lapi.RecordResponse("DELETE", "/v1/decisions?test=test", emptyBody, PASSWORD) + assert.Equal(t, 500, w.Code) + err_resp, _, err := readDecisionsErrorResp(w) + assert.NoError(t, err) + assert.Equal(t, err_resp["message"], "'test' doesn't exist: invalid filter") + + // Delete all alert + w = lapi.RecordResponse("DELETE", "/v1/decisions", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + resp, _, err := readDecisionsDeleteResp(w) + assert.NoError(t, err) + assert.Equal(t, resp.NbDeleted, "3") +} + +func TestStreamStartDecisionDedup(t *testing.T) { + //Ensure that at stream startup we only get the longest decision + lapi := SetupLAPITest(t) + + // Create Valid Alert : 3 decisions for 127.0.0.1, longest has id=3 + lapi.InsertAlertFromFile("./tests/alert_sample.json") + + // Get Stream, we only get one decision (the longest one) + w := lapi.RecordResponse("GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + decisions, code, err := readDecisionsStreamResp(w) + assert.Equal(t, nil, err) + assert.Equal(t, 200, code) + assert.Equal(t, 0, len(decisions["deleted"])) + assert.Equal(t, 1, len(decisions["new"])) + assert.Equal(t, int64(3), decisions["new"][0].ID) + assert.Equal(t, "test", *decisions["new"][0].Origin) + assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) + + // id=3 decision is deleted, this won't affect `deleted`, because there are decisions on the same ip + w = lapi.RecordResponse("DELETE", "/v1/decisions/3", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + + // Get Stream, we only get one decision (the longest one, id=2) + w = lapi.RecordResponse("GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + decisions, code, err = readDecisionsStreamResp(w) + assert.Equal(t, nil, err) + assert.Equal(t, 200, code) + assert.Equal(t, 0, len(decisions["deleted"])) + assert.Equal(t, 1, len(decisions["new"])) + assert.Equal(t, int64(2), decisions["new"][0].ID) + assert.Equal(t, "test", *decisions["new"][0].Origin) + assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) + + // We delete another decision, yet don't receive it in stream, since there's another decision on same IP + w = lapi.RecordResponse("DELETE", "/v1/decisions/2", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + + // And get the remaining decision (1) + w = lapi.RecordResponse("GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + decisions, code, err = readDecisionsStreamResp(w) + assert.Equal(t, nil, err) + assert.Equal(t, 200, code) + assert.Equal(t, 0, len(decisions["deleted"])) + assert.Equal(t, 1, len(decisions["new"])) + assert.Equal(t, int64(1), decisions["new"][0].ID) + assert.Equal(t, "test", *decisions["new"][0].Origin) + assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) + + // We delete the last decision, we receive the delete order + w = lapi.RecordResponse("DELETE", "/v1/decisions/1", emptyBody, PASSWORD) + assert.Equal(t, 200, w.Code) + + //and now we only get a deleted decision + w = lapi.RecordResponse("GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + decisions, code, err = readDecisionsStreamResp(w) + assert.Equal(t, nil, err) + assert.Equal(t, 200, code) + assert.Equal(t, 1, len(decisions["deleted"])) + assert.Equal(t, int64(1), decisions["deleted"][0].ID) + assert.Equal(t, "test", *decisions["deleted"][0].Origin) + assert.Equal(t, "127.0.0.1", *decisions["deleted"][0].Value) + assert.Equal(t, 0, len(decisions["new"])) +} + +type DecisionCheck struct { + ID int64 + Origin string + Scenario string + Value string + Duration string + Type string +} + +type DecisionTest struct { + TestName string + Method string + Route string + CheckCodeOnly bool + Code int + LenNew int + LenDeleted int + NewChecks []DecisionCheck + DelChecks []DecisionCheck + AuthType string +} diff --git a/pkg/apiserver/heartbeat_test.go b/pkg/apiserver/heartbeat_test.go new file mode 100644 index 0000000..0082f23 --- /dev/null +++ b/pkg/apiserver/heartbeat_test.go @@ -0,0 +1,18 @@ +package apiserver + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHeartBeat(t *testing.T) { + lapi := SetupLAPITest(t) + + w := lapi.RecordResponse(http.MethodGet, "/v1/heartbeat", emptyBody, "password") + assert.Equal(t, 200, w.Code) + + w = lapi.RecordResponse("POST", "/v1/heartbeat", emptyBody, "password") + assert.Equal(t, 405, w.Code) +} diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go new file mode 100644 index 0000000..b589213 --- /dev/null +++ b/pkg/apiserver/jwt_test.go @@ -0,0 +1,95 @@ +package apiserver + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestLogin(t *testing.T) { + router, config, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + body, err := CreateTestMachine(router) + if err != nil { + log.Fatalln(err.Error()) + } + + // Login with machine not validated yet + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"code\":401,\"message\":\"machine test not validated\"}", w.Body.String()) + + // Login with machine not exist + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader("{\"machine_id\": \"test1\", \"password\": \"test1\"}")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"code\":401,\"message\":\"ent: machine not found\"}", w.Body.String()) + + // Login with invalid body + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader("test")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"code\":401,\"message\":\"missing: invalid character 'e' in literal true (expecting 'r')\"}", w.Body.String()) + + // Login with invalid format + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader("{\"machine_id\": \"test1\"}")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"code\":401,\"message\":\"input format error\"}", w.Body.String()) + + //Validate machine + err = ValidateMachine("test", config.API.Server.DbConfig) + if err != nil { + log.Fatalln(err.Error()) + } + + // Login with invalid password + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader("{\"machine_id\": \"test\", \"password\": \"test1\"}")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"code\":401,\"message\":\"incorrect Username or Password\"}", w.Body.String()) + + // Login with valid machine + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "\"token\"") + assert.Contains(t, w.Body.String(), "\"expire\"") + + // Login with valid machine + scenarios + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader("{\"machine_id\": \"test\", \"password\": \"test\", \"scenarios\": [\"crowdsecurity/test\", \"crowdsecurity/test2\"]}")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "\"token\"") + assert.Contains(t, w.Body.String(), "\"expire\"") + +} diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go new file mode 100644 index 0000000..63c4e25 --- /dev/null +++ b/pkg/apiserver/machines_test.go @@ -0,0 +1,169 @@ +package apiserver + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestCreateMachine(t *testing.T) { + router, _, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + // Create machine with invalid format + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader("test")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 400, w.Code) + assert.Equal(t, "{\"message\":\"invalid character 'e' in literal true (expecting 'r')\"}", w.Body.String()) + + // Create machine with invalid input + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader("{\"test\": \"test\"}")) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 500, w.Code) + assert.Equal(t, "{\"message\":\"validation failure list:\\nmachine_id in body is required\\npassword in body is required\"}", w.Body.String()) + + // Create machine + b, err := json.Marshal(MachineTest) + if err != nil { + log.Fatal("unable to marshal MachineTest") + } + body := string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "", w.Body.String()) + +} + +func TestCreateMachineWithForwardedFor(t *testing.T) { + router, config, err := NewAPITestForwardedFor() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + router.TrustedPlatform = "X-Real-IP" + // Create machine + b, err := json.Marshal(MachineTest) + if err != nil { + log.Fatal("unable to marshal MachineTest") + } + body := string(b) + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Real-Ip", "1.1.1.1") + router.ServeHTTP(w, req) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "", w.Body.String()) + + ip, err := GetMachineIP(*MachineTest.MachineID, config.API.Server.DbConfig) + if err != nil { + log.Fatalf("Could not get machine IP : %s", err) + } + assert.Equal(t, "1.1.1.1", ip) +} + +func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { + router, config, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + // Create machine + b, err := json.Marshal(MachineTest) + if err != nil { + log.Fatal("unable to marshal MachineTest") + } + body := string(b) + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Real-IP", "1.1.1.1") + router.ServeHTTP(w, req) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "", w.Body.String()) + + ip, err := GetMachineIP(*MachineTest.MachineID, config.API.Server.DbConfig) + if err != nil { + log.Fatalf("Could not get machine IP : %s", err) + } + //For some reason, the IP is empty when running tests + //if no forwarded-for headers are present + assert.Equal(t, "", ip) +} + +func TestCreateMachineWithoutForwardedFor(t *testing.T) { + router, config, err := NewAPITestForwardedFor() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + // Create machine + b, err := json.Marshal(MachineTest) + if err != nil { + log.Fatal("unable to marshal MachineTest") + } + body := string(b) + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "", w.Body.String()) + + ip, err := GetMachineIP(*MachineTest.MachineID, config.API.Server.DbConfig) + if err != nil { + log.Fatalf("Could not get machine IP : %s", err) + } + //For some reason, the IP is empty when running tests + //if no forwarded-for headers are present + assert.Equal(t, "", ip) +} + +func TestCreateMachineAlreadyExist(t *testing.T) { + router, _, err := NewAPITest() + if err != nil { + log.Fatalf("unable to run local API: %s", err) + } + + body, err := CreateTestMachine(router) + if err != nil { + log.Fatalln(err.Error()) + } + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + router.ServeHTTP(w, req) + + assert.Equal(t, 403, w.Code) + assert.Equal(t, "{\"message\":\"user 'test': user already exist\"}", w.Body.String()) + +} diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go new file mode 100644 index 0000000..503f4d4 --- /dev/null +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -0,0 +1,226 @@ +package v1 + +import ( + "crypto/rand" + "crypto/sha512" + "encoding/hex" + "fmt" + "net/http" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" +) + +var ( + APIKeyHeader = "X-Api-Key" + bouncerContextKey = "bouncer_info" +) + +type APIKey struct { + HeaderName string + DbClient *database.Client + TlsAuth *TLSAuth +} + +func GenerateAPIKey(n int) (string, error) { + bytes := make([]byte, n) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +func NewAPIKey(dbClient *database.Client) *APIKey { + return &APIKey{ + HeaderName: APIKeyHeader, + DbClient: dbClient, + TlsAuth: &TLSAuth{}, + } +} + +func HashSHA512(str string) string { + hashedKey := sha512.New() + hashedKey.Write([]byte(str)) + + hashStr := fmt.Sprintf("%x", hashedKey.Sum(nil)) + + return hashStr +} + +func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { + return func(c *gin.Context) { + var bouncer *ent.Bouncer + var err error + + if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 { + if a.TlsAuth == nil { + log.WithField("ip", c.ClientIP()).Error("TLS Auth is not configured but client presented a certificate") + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + validCert, extractedCN, err := a.TlsAuth.ValidateCert(c) + if !validCert { + log.WithField("ip", c.ClientIP()).Errorf("invalid client certificate: %s", err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + if err != nil { + log.WithField("ip", c.ClientIP()).Error(err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + bouncerName := fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) + bouncer, err = a.DbClient.SelectBouncerByName(bouncerName) + //This is likely not the proper way, but isNotFound does not seem to work + if err != nil && strings.Contains(err.Error(), "bouncer not found") { + //Because we have a valid cert, automatically create the bouncer in the database if it does not exist + //Set a random API key, but it will never be used + apiKey, err := GenerateAPIKey(64) + if err != nil { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "cn": extractedCN, + }).Errorf("error generating mock api key: %s", err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "cn": extractedCN, + }).Infof("Creating bouncer %s", bouncerName) + bouncer, err = a.DbClient.CreateBouncer(bouncerName, c.ClientIP(), HashSHA512(apiKey), types.TlsAuthType) + if err != nil { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "cn": extractedCN, + }).Errorf("creating bouncer db entry : %s", err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + } else if err != nil { + //error while selecting bouncer + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "cn": extractedCN, + }).Errorf("while selecting bouncers: %s", err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } else if bouncer.AuthType != types.TlsAuthType { + //bouncer was found in DB + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "cn": extractedCN, + }).Errorf("bouncer isn't allowed to auth by TLS") + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + } else { + //API Key Authentication + val, ok := c.Request.Header[APIKeyHeader] + if !ok { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + }).Errorf("API key not found") + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + hashStr := HashSHA512(val[0]) + bouncer, err = a.DbClient.SelectBouncer(hashStr) + if err != nil { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + }).Errorf("while fetching bouncer info: %s", err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + if bouncer.AuthType != types.ApiKeyAuthType { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + }).Errorf("bouncer %s attempted to login using an API key but it is configured to auth with %s", bouncer.Name, bouncer.AuthType) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + } + + if bouncer == nil { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + }).Errorf("bouncer not found") + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + + //maybe we want to store the whole bouncer object in the context instead, this would avoid another db query + //in StreamDecision + c.Set("BOUNCER_NAME", bouncer.Name) + c.Set("BOUNCER_HASHED_KEY", bouncer.APIKey) + + if bouncer.IPAddress == "" { + err = a.DbClient.UpdateBouncerIP(c.ClientIP(), bouncer.ID) + if err != nil { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "name": bouncer.Name, + }).Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + } + + if bouncer.IPAddress != c.ClientIP() && bouncer.IPAddress != "" { + log.Warningf("new IP address detected for bouncer '%s': %s (old: %s)", bouncer.Name, c.ClientIP(), bouncer.IPAddress) + err = a.DbClient.UpdateBouncerIP(c.ClientIP(), bouncer.ID) + if err != nil { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "name": bouncer.Name, + }).Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return + } + } + + useragent := strings.Split(c.Request.UserAgent(), "/") + + if len(useragent) != 2 { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "name": bouncer.Name, + }).Warningf("bad user agent '%s'", c.Request.UserAgent()) + useragent = []string{c.Request.UserAgent(), "N/A"} + } + + if bouncer.Version != useragent[1] || bouncer.Type != useragent[0] { + if err := a.DbClient.UpdateBouncerTypeAndVersion(useragent[0], useragent[1], bouncer.ID); err != nil { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "name": bouncer.Name, + }).Errorf("failed to update bouncer version and type: %s", err) + c.JSON(http.StatusForbidden, gin.H{"message": "bad user agent"}) + c.Abort() + return + } + } + + c.Set(bouncerContextKey, bouncer) + + c.Next() + } +} diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go new file mode 100644 index 0000000..9f69f33 --- /dev/null +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -0,0 +1,286 @@ +package v1 + +import ( + "crypto/rand" + "fmt" + "net/http" + "os" + "strings" + "time" + + jwt "github.com/appleboy/gin-jwt/v2" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "golang.org/x/crypto/bcrypt" +) + +var identityKey = "id" + +type JWT struct { + Middleware *jwt.GinJWTMiddleware + DbClient *database.Client + TlsAuth *TLSAuth +} + +func PayloadFunc(data interface{}) jwt.MapClaims { + if value, ok := data.(*models.WatcherAuthRequest); ok { + return jwt.MapClaims{ + identityKey: &value.MachineID, + } + } + return jwt.MapClaims{} +} + +func IdentityHandler(c *gin.Context) interface{} { + claims := jwt.ExtractClaims(c) + machineId := claims[identityKey].(string) + return &models.WatcherAuthRequest{ + MachineID: &machineId, + } +} + +func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { + var loginInput models.WatcherAuthRequest + var scenarios string + var err error + var scenariosInput []string + var clientMachine *ent.Machine + var machineID string + + if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 { + if j.TlsAuth == nil { + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return nil, errors.New("TLS auth is not configured") + } + validCert, extractedCN, err := j.TlsAuth.ValidateCert(c) + if err != nil { + log.Error(err) + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return nil, errors.Wrap(err, "while trying to validate client cert") + } + if !validCert { + c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) + c.Abort() + return nil, fmt.Errorf("failed cert authentication") + } + + machineID = fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) + clientMachine, err = j.DbClient.Ent.Machine.Query(). + Where(machine.MachineId(machineID)). + First(j.DbClient.CTX) + if ent.IsNotFound(err) { + //Machine was not found, let's create it + log.Printf("machine %s not found, create it", machineID) + //let's use an apikey as the password, doesn't matter in this case (generatePassword is only available in cscli) + pwd, err := GenerateAPIKey(64) + if err != nil { + log.WithFields(log.Fields{ + "ip": c.ClientIP(), + "cn": extractedCN, + }).Errorf("error generating password: %s", err) + return nil, fmt.Errorf("error generating password") + } + password := strfmt.Password(pwd) + clientMachine, err = j.DbClient.CreateMachine(&machineID, &password, "", true, true, types.TlsAuthType) + if err != nil { + return "", errors.Wrapf(err, "while creating machine entry for %s", machineID) + } + } else if err != nil { + return "", errors.Wrapf(err, "while selecting machine entry for %s", machineID) + } else { + if clientMachine.AuthType != types.TlsAuthType { + return "", errors.Errorf("machine %s attempted to auth with TLS cert but it is configured to use %s", machineID, clientMachine.AuthType) + } + machineID = clientMachine.MachineId + loginInput := struct { + Scenarios []string `json:"scenarios"` + }{ + Scenarios: []string{}, + } + err := c.ShouldBindJSON(&loginInput) + if err != nil { + return "", errors.Wrap(err, "missing scenarios list in login request for TLS auth") + } + scenariosInput = loginInput.Scenarios + } + + } else { + //normal auth + + if err := c.ShouldBindJSON(&loginInput); err != nil { + return "", errors.Wrap(err, "missing") + } + if err := loginInput.Validate(strfmt.Default); err != nil { + return "", errors.New("input format error") + } + machineID = *loginInput.MachineID + password := *loginInput.Password + scenariosInput = loginInput.Scenarios + + clientMachine, err = j.DbClient.Ent.Machine.Query(). + Where(machine.MachineId(machineID)). + First(j.DbClient.CTX) + if err != nil { + log.Printf("Error machine login for %s : %+v ", machineID, err) + return nil, err + } + + if clientMachine == nil { + log.Errorf("Nothing for '%s'", machineID) + return nil, jwt.ErrFailedAuthentication + } + + if clientMachine.AuthType != types.PasswordAuthType { + return nil, errors.Errorf("machine %s attempted to auth with password but it is configured to use %s", machineID, clientMachine.AuthType) + } + + if !clientMachine.IsValidated { + return nil, fmt.Errorf("machine %s not validated", machineID) + } + + if err = bcrypt.CompareHashAndPassword([]byte(clientMachine.Password), []byte(password)); err != nil { + return nil, jwt.ErrFailedAuthentication + } + + //end of normal auth + } + + if len(scenariosInput) > 0 { + for _, scenario := range scenariosInput { + if scenarios == "" { + scenarios = scenario + } else { + scenarios += "," + scenario + } + } + err = j.DbClient.UpdateMachineScenarios(scenarios, clientMachine.ID) + if err != nil { + log.Errorf("Failed to update scenarios list for '%s': %s\n", machineID, err) + return nil, jwt.ErrFailedAuthentication + } + } + + if clientMachine.IpAddress == "" { + err = j.DbClient.UpdateMachineIP(c.ClientIP(), clientMachine.ID) + if err != nil { + log.Errorf("Failed to update ip address for '%s': %s\n", machineID, err) + return nil, jwt.ErrFailedAuthentication + } + } + + if clientMachine.IpAddress != c.ClientIP() && clientMachine.IpAddress != "" { + log.Warningf("new IP address detected for machine '%s': %s (old: %s)", clientMachine.MachineId, c.ClientIP(), clientMachine.IpAddress) + err = j.DbClient.UpdateMachineIP(c.ClientIP(), clientMachine.ID) + if err != nil { + log.Errorf("Failed to update ip address for '%s': %s\n", clientMachine.MachineId, err) + return nil, jwt.ErrFailedAuthentication + } + } + + useragent := strings.Split(c.Request.UserAgent(), "/") + if len(useragent) != 2 { + log.Warningf("bad user agent '%s' from '%s'", c.Request.UserAgent(), c.ClientIP()) + return nil, jwt.ErrFailedAuthentication + } + + if err := j.DbClient.UpdateMachineVersion(useragent[1], clientMachine.ID); err != nil { + log.Errorf("unable to update machine '%s' version '%s': %s", clientMachine.MachineId, useragent[1], err) + log.Errorf("bad user agent from : %s", c.ClientIP()) + return nil, jwt.ErrFailedAuthentication + } + return &models.WatcherAuthRequest{ + MachineID: &machineID, + }, nil + +} + +func Authorizator(data interface{}, c *gin.Context) bool { + return true +} + +func Unauthorized(c *gin.Context, code int, message string) { + c.JSON(code, gin.H{ + "code": code, + "message": message, + }) +} + +func randomSecret() ([]byte, error) { + size := 64 + secret := make([]byte, size) + + n, err := rand.Read(secret) + if err != nil { + return nil, errors.New("unable to generate a new random seed for JWT generation") + } + + if n != size { + return nil, errors.New("not enough entropy at random seed generation for JWT generation") + } + + return secret, nil +} + +func NewJWT(dbClient *database.Client) (*JWT, error) { + // Get secret from environment variable "SECRET" + var ( + secret []byte + err error + ) + + // Please be aware that brute force HS256 is possible. + // PLEASE choose a STRONG secret + secretString := os.Getenv("CS_LAPI_SECRET") + secret = []byte(secretString) + + switch l := len(secret); { + case l == 0: + secret, err = randomSecret() + if err != nil { + return &JWT{}, err + } + case l < 64: + return &JWT{}, errors.New("CS_LAPI_SECRET not strong enough") + } + + jwtMiddleware := &JWT{ + DbClient: dbClient, + TlsAuth: &TLSAuth{}, + } + + ret, err := jwt.New(&jwt.GinJWTMiddleware{ + Realm: "Crowdsec API local", + Key: secret, + Timeout: time.Hour, + MaxRefresh: time.Hour, + IdentityKey: identityKey, + PayloadFunc: PayloadFunc, + IdentityHandler: IdentityHandler, + Authenticator: jwtMiddleware.Authenticator, + Authorizator: Authorizator, + Unauthorized: Unauthorized, + TokenLookup: "header: Authorization, query: token, cookie: jwt", + TokenHeadName: "Bearer", + TimeFunc: time.Now, + }) + if err != nil { + return &JWT{}, err + } + + errInit := ret.MiddlewareInit() + if errInit != nil { + return &JWT{}, fmt.Errorf("authMiddleware.MiddlewareInit() Error:" + errInit.Error()) + } + jwtMiddleware.Middleware = ret + + return jwtMiddleware, nil +} diff --git a/pkg/apiserver/middlewares/v1/middlewares.go b/pkg/apiserver/middlewares/v1/middlewares.go new file mode 100644 index 0000000..26879bd --- /dev/null +++ b/pkg/apiserver/middlewares/v1/middlewares.go @@ -0,0 +1,22 @@ +package v1 + +import "github.com/crowdsecurity/crowdsec/pkg/database" + +type Middlewares struct { + APIKey *APIKey + JWT *JWT +} + +func NewMiddlewares(dbClient *database.Client) (*Middlewares, error) { + var err error + + ret := &Middlewares{} + + ret.JWT, err = NewJWT(dbClient) + if err != nil { + return &Middlewares{}, err + } + + ret.APIKey = NewAPIKey(dbClient) + return ret, nil +} diff --git a/pkg/apiserver/middlewares/v1/tls_auth.go b/pkg/apiserver/middlewares/v1/tls_auth.go new file mode 100644 index 0000000..a0b837a --- /dev/null +++ b/pkg/apiserver/middlewares/v1/tls_auth.go @@ -0,0 +1,257 @@ +package v1 + +import ( + "bytes" + "crypto" + "crypto/x509" + "fmt" + "io" + "net/http" + "net/url" + "os" + "time" + + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "golang.org/x/crypto/ocsp" +) + +type TLSAuth struct { + AllowedOUs []string + CrlPath string + revokationCache map[string]cacheEntry + cacheExpiration time.Duration + logger *log.Entry +} + +type cacheEntry struct { + revoked bool + err error + timestamp time.Time +} + +func (ta *TLSAuth) ocspQuery(server string, cert *x509.Certificate, issuer *x509.Certificate) (*ocsp.Response, error) { + req, err := ocsp.CreateRequest(cert, issuer, &ocsp.RequestOptions{Hash: crypto.SHA256}) + if err != nil { + ta.logger.Errorf("TLSAuth: error creating OCSP request: %s", err) + return nil, err + } + httpRequest, err := http.NewRequest(http.MethodPost, server, bytes.NewBuffer(req)) + if err != nil { + ta.logger.Error("TLSAuth: cannot create HTTP request for OCSP") + return nil, err + } + ocspURL, err := url.Parse(server) + if err != nil { + ta.logger.Error("TLSAuth: cannot parse OCSP URL") + return nil, err + } + httpRequest.Header.Add("Content-Type", "application/ocsp-request") + httpRequest.Header.Add("Accept", "application/ocsp-response") + httpRequest.Header.Add("host", ocspURL.Host) + httpClient := &http.Client{} + httpResponse, err := httpClient.Do(httpRequest) + if err != nil { + ta.logger.Error("TLSAuth: cannot send HTTP request to OCSP") + return nil, err + } + defer httpResponse.Body.Close() + output, err := io.ReadAll(httpResponse.Body) + if err != nil { + ta.logger.Error("TLSAuth: cannot read HTTP response from OCSP") + return nil, err + } + ocspResponse, err := ocsp.ParseResponseForCert(output, cert, issuer) + return ocspResponse, err +} + +func (ta *TLSAuth) isExpired(cert *x509.Certificate) bool { + now := time.Now().UTC() + + if cert.NotAfter.UTC().Before(now) { + ta.logger.Errorf("TLSAuth: client certificate is expired (NotAfter: %s)", cert.NotAfter.UTC()) + return true + } + if cert.NotBefore.UTC().After(now) { + ta.logger.Errorf("TLSAuth: client certificate is not yet valid (NotBefore: %s)", cert.NotBefore.UTC()) + return true + } + return false +} + +func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { + if cert.OCSPServer == nil || (cert.OCSPServer != nil && len(cert.OCSPServer) == 0) { + ta.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") + return false, nil + } + for _, server := range cert.OCSPServer { + ocspResponse, err := ta.ocspQuery(server, cert, issuer) + if err != nil { + ta.logger.Errorf("TLSAuth: error querying OCSP server %s: %s", server, err) + continue + } + switch ocspResponse.Status { + case ocsp.Good: + return false, nil + case ocsp.Revoked: + return true, fmt.Errorf("client certificate is revoked by server %s", server) + case ocsp.Unknown: + log.Debugf("unknow OCSP status for server %s", server) + continue + } + } + log.Infof("Could not get any valid OCSP response, assuming the cert is revoked") + return true, nil +} + +func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, error) { + if ta.CrlPath == "" { + ta.logger.Warn("no crl_path, skipping CRL check") + return false, nil + } + crlContent, err := os.ReadFile(ta.CrlPath) + if err != nil { + ta.logger.Warnf("could not read CRL file, skipping check: %s", err) + return false, nil + } + crl, err := x509.ParseCRL(crlContent) + if err != nil { + ta.logger.Warnf("could not parse CRL file, skipping check: %s", err) + return false, nil + } + if crl.HasExpired(time.Now().UTC()) { + ta.logger.Warn("CRL has expired, will still validate the cert against it.") + } + for _, revoked := range crl.TBSCertList.RevokedCertificates { + if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { + return true, fmt.Errorf("client certificate is revoked by CRL") + } + } + return false, nil +} + +func (ta *TLSAuth) isRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { + sn := cert.SerialNumber.String() + if cacheValue, ok := ta.revokationCache[sn]; ok { + if time.Now().UTC().Sub(cacheValue.timestamp) < ta.cacheExpiration { + ta.logger.Debugf("TLSAuth: using cached value for cert %s: %t | %s", sn, cacheValue.revoked, cacheValue.err) + return cacheValue.revoked, cacheValue.err + } else { + ta.logger.Debugf("TLSAuth: cached value expired, removing from cache") + delete(ta.revokationCache, sn) + } + } else { + ta.logger.Tracef("TLSAuth: no cached value for cert %s", sn) + } + revoked, err := ta.isOCSPRevoked(cert, issuer) + if err != nil { + ta.revokationCache[sn] = cacheEntry{ + revoked: revoked, + err: err, + timestamp: time.Now().UTC(), + } + return true, err + } + if revoked { + ta.revokationCache[sn] = cacheEntry{ + revoked: revoked, + err: err, + timestamp: time.Now().UTC(), + } + return true, nil + } + revoked, err = ta.isCRLRevoked(cert) + ta.revokationCache[sn] = cacheEntry{ + revoked: revoked, + err: err, + timestamp: time.Now().UTC(), + } + return revoked, err +} + +func (ta *TLSAuth) isInvalid(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { + if ta.isExpired(cert) { + return true, nil + } + revoked, err := ta.isRevoked(cert, issuer) + if err != nil { + //Fail securely, if we can't check the revokation status, let's consider the cert invalid + //We may change this in the future based on users feedback, but this seems the most sensible thing to do + return true, errors.Wrap(err, "could not check for client certification revokation status") + } + + return revoked, nil +} + +func (ta *TLSAuth) SetAllowedOu(allowedOus []string) error { + for _, ou := range allowedOus { + //disallow empty ou + if ou == "" { + return fmt.Errorf("empty ou isn't allowed") + } + //drop & warn on duplicate ou + ok := true + for _, validOu := range ta.AllowedOUs { + if validOu == ou { + ta.logger.Warningf("dropping duplicate ou %s", ou) + ok = false + } + } + if ok { + ta.AllowedOUs = append(ta.AllowedOUs, ou) + } + } + return nil +} + +func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { + //Checks cert validity, Returns true + CN if client cert matches requested OU + var clientCert *x509.Certificate + if c.Request.TLS == nil || len(c.Request.TLS.PeerCertificates) == 0 { + //do not error if it's not TLS or there are no peer certs + return false, "", nil + } + + if len(c.Request.TLS.VerifiedChains) > 0 { + validOU := false + clientCert = c.Request.TLS.VerifiedChains[0][0] + for _, ou := range clientCert.Subject.OrganizationalUnit { + for _, allowedOu := range ta.AllowedOUs { + if allowedOu == ou { + validOU = true + break + } + } + } + if !validOU { + return false, "", fmt.Errorf("client certificate OU (%v) doesn't match expected OU (%v)", + clientCert.Subject.OrganizationalUnit, ta.AllowedOUs) + } + revoked, err := ta.isInvalid(clientCert, c.Request.TLS.VerifiedChains[0][1]) + if err != nil { + ta.logger.Errorf("TLSAuth: error checking if client certificate is revoked: %s", err) + return false, "", errors.Wrap(err, "could not check for client certification revokation status") + } + if revoked { + return false, "", fmt.Errorf("client certificate is revoked") + } + ta.logger.Debugf("client OU %v is allowed vs required OU %v", clientCert.Subject.OrganizationalUnit, ta.AllowedOUs) + return true, clientCert.Subject.CommonName, nil + } + return false, "", fmt.Errorf("no verified cert in request") +} + +func NewTLSAuth(allowedOus []string, crlPath string, cacheExpiration time.Duration, logger *log.Entry) (*TLSAuth, error) { + ta := &TLSAuth{ + revokationCache: map[string]cacheEntry{}, + cacheExpiration: cacheExpiration, + CrlPath: crlPath, + logger: logger, + } + err := ta.SetAllowedOu(allowedOus) + if err != nil { + return nil, err + } + return ta, nil +} diff --git a/pkg/apiserver/tests/alertWithInvalidMachineID_sample.json b/pkg/apiserver/tests/alertWithInvalidMachineID_sample.json new file mode 100644 index 0000000..65ea45a --- /dev/null +++ b/pkg/apiserver/tests/alertWithInvalidMachineID_sample.json @@ -0,0 +1,59 @@ +[ + { + "id": 1, + "machine_id": "test1", + "capacity": 1, + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "id": 1, + "duration": "1h", + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "Events": [ + { + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "timestamp": "2020-10-09T10:00:01Z" + } + ], + "events_count": 1, + "labels": [ + "test" + ], + "leakspeed": "0.5s", + "message": "test", + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "scenario": "crowdsecurity/test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "source": { + "as_name": "test", + "as_number": "0123456", + "cn": "france", + "ip": "127.0.0.1", + "latitude": 46.227638, + "logitude": 2.213749, + "range": "127.0.0.1/32", + "scope": "ip", + "value": "127.0.0.1" + }, + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + } +] \ No newline at end of file diff --git a/pkg/apiserver/tests/alert_bulk.json b/pkg/apiserver/tests/alert_bulk.json new file mode 100644 index 0000000..e9cf4d1 --- /dev/null +++ b/pkg/apiserver/tests/alert_bulk.json @@ -0,0 +1,5362 @@ +[ + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.195", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.195" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + }, + { + "key": "target_user", + "value": "ruru" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.4" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + }, + { + "key": "target_user", + "value": "ruru" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.4" + }, + { + "key": "IsoCode", + "value": "US" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "ruru" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.4" + }, + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "ruru" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.4" + }, + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.4" + }, + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + }, + { + "key": "target_user", + "value": "ruru" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.4" + }, + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + }, + { + "key": "target_user", + "value": "ruru" + }, + { + "key": "service", + "value": "ssh" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 1.2.3.4 performed 'crowdsecurity/ssh-bf' (6 events over 41.41343ms) at 2020-10-26 12:54:48.786745305 +0100 CET m=+118.777986380", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "cn": "US", + "ip": "1.2.3.4", + "latitude": 47.913, + "longitude": -122.3042, + "scope": "Ip", + "value": "1.2.3.4" + }, + "start_at": "2020-10-26T12:54:48.745331839+01:00", + "stop_at": "2020-10-26T12:54:48.786744746+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "target_user", + "value": "rura" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.6" + }, + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "1.2.3.6" + }, + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + }, + { + "key": "target_user", + "value": "rura" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.6" + }, + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + }, + { + "key": "target_user", + "value": "rura" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.6" + }, + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + }, + { + "key": "target_user", + "value": "rura" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "rura" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.6" + }, + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "rura" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "1.2.3.6" + }, + { + "key": "IsoCode", + "value": "US" + }, + { + "key": "IsInEU", + "value": "false" + }, + { + "key": "ASNNumber", + "value": "0" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 1.2.3.6 performed 'crowdsecurity/ssh-bf' (6 events over 33.162937ms) at 2020-10-26 12:55:33.554883657 +0100 CET m=+163.546124740", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "cn": "US", + "ip": "1.2.3.6", + "latitude": 47.913, + "longitude": -122.3042, + "scope": "Ip", + "value": "1.2.3.6" + }, + "start_at": "2020-10-26T12:55:33.521720645+01:00", + "stop_at": "2020-10-26T12:55:33.554882819+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.194" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.194" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.194" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.194" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.194" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.194" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.194 performed 'crowdsecurity/ssh-bf' (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.194", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.194" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.193" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.193" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.193" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.193" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.193" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.193" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.193 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.193", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.193" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.192" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.192" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.192" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.192" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.192" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.192" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.192 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.192", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.192" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.191" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.191" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.191" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.191" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.191" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.191" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.191 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.191", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.191" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.190" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.190" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.190" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.190" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.190" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.190" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.190 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.190", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.190" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.189" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.189" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.189" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.189" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.189" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.189" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.189 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.189", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.189" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.188" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.188" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.188" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.188" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.188" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.188" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.188 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.188", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.188" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.187" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.187" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.187" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.187" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.187" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.187" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.187 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.187", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.187" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.186" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.186" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.186" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.186" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.186" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.186" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.186 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.186", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.186" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.185" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.185" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.185" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.185" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.185" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.185" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.185 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.185", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.185" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.184" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.184" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.184" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.184" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.184" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.184" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.184 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.184", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.184" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.183" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.183" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.183" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.183" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.183" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.183" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.183 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.183", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.183" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.182" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.182" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.182" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.182" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.182" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.182" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.182 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.182", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.182" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.181" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.181" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.181" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.181" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.181" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.181" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.181 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.181", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.181" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.180" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.180" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.180" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.180" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.180" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.180" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.180 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.180", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.180" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.179", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.179" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.178", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.178" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + } +] \ No newline at end of file diff --git a/pkg/apiserver/tests/alert_duplicate.json b/pkg/apiserver/tests/alert_duplicate.json new file mode 100644 index 0000000..8377895 --- /dev/null +++ b/pkg/apiserver/tests/alert_duplicate.json @@ -0,0 +1,266 @@ +[ + { + "id": 42, + "machine_id": "test", + "capacity": 1, + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "duration": "1h", + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "Ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "source": { + "ip": "127.0.0.1", + "range": "127.0.0.1/32", + "scope": "ip", + "value": "127.0.0.1" + }, + "Events": [ + ], + "events_count": 1, + "leakspeed": "0.5s", + "message": "test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "scenario": "crowdsecurity/test", + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + }, + { + "id": 44, + "machine_id": "test", + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "duration": "3h", + "origin": "another_origin", + "scenario": "crowdsecurity/ssh_bf", + "scope": "Ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "source": { + "ip": "127.0.0.1", + "range": "127.0.0.1/32", + "scope": "ip", + "value": "127.0.0.1" + }, + "Events": [ + ], + "events_count": 1, + "leakspeed": "0.5s", + "message": "test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "capacity": 1, + "scenario": "crowdsecurity/ssh_bf", + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + }, + { + "id": 45, + "machine_id": "test", + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "duration": "5h", + "origin": "test", + "scenario": "crowdsecurity/longest", + "scope": "Ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "source": { + "ip": "127.0.0.1", + "range": "127.0.0.1/32", + "scope": "ip", + "value": "127.0.0.1" + }, + "Events": [ + ], + "events_count": 1, + "leakspeed": "0.5s", + "message": "test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "capacity": 1, + "scenario": "crowdsecurity/longest", + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + }, + { + "id": 46, + "machine_id": "test", + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "duration": "3h", + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "Ip", + "value": "127.0.0.2", + "type": "ban" + } + ], + "source": { + "ip": "127.0.0.2", + "range": "127.0.0.2/32", + "scope": "ip", + "value": "127.0.0.2" + }, + "Events": [ + ], + "events_count": 1, + "leakspeed": "0.5s", + "message": "test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "capacity": 1, + "scenario": "crowdsecurity/test", + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + }, + { + "id": 47, + "machine_id": "test", + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "duration": "3h", + "origin": "test", + "scenario": "crowdsecurity/ssh_bf", + "scope": "Ip", + "value": "127.0.0.2", + "type": "ban" + } + ], + "source": { + "ip": "127.0.0.2", + "range": "127.0.0.2/32", + "scope": "ip", + "value": "127.0.0.2" + }, + "Events": [ + ], + "events_count": 1, + "leakspeed": "0.5s", + "message": "test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "capacity": 1, + "scenario": "crowdsecurity/ssh_bf", + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + }, + { + "id": 48, + "machine_id": "test", + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "duration": "1h", + "origin": "test", + "scenario": "crowdsecurity/ssh_bf", + "scope": "Ip", + "value": "127.0.0.2", + "type": "ban" + } + ], + "source": { + "ip": "127.0.0.2", + "range": "127.0.0.2/32", + "scope": "ip", + "value": "127.0.0.2" + }, + "Events": [ + ], + "events_count": 1, + "leakspeed": "0.5s", + "message": "test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "capacity": 1, + "scenario": "crowdsecurity/ssh_bf", + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + }, + { + "id": 49, + "machine_id": "test", + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "duration": "2h", + "origin": "another_origin", + "scenario": "crowdsecurity/test", + "scope": "Ip", + "value": "127.0.0.2", + "type": "ban" + } + ], + "source": { + "ip": "127.0.0.2", + "range": "127.0.0.2/32", + "scope": "ip", + "value": "127.0.0.2" + }, + "Events": [ + ], + "events_count": 1, + "leakspeed": "0.5s", + "message": "test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "capacity": 1, + "scenario": "crowdsecurity/test", + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + }, + { + "id": 50, + "machine_id": "test", + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "duration": "3h", + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "Ip", + "value": "127.0.0.2", + "type": "captcha" + } + ], + "source": { + "ip": "127.0.0.2", + "range": "127.0.0.2/32", + "scope": "ip", + "value": "127.0.0.2" + }, + "Events": [ + ], + "events_count": 1, + "leakspeed": "0.5s", + "message": "test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "capacity": 1, + "scenario": "crowdsecurity/test", + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + } +] \ No newline at end of file diff --git a/pkg/apiserver/tests/alert_minibulk+simul.json b/pkg/apiserver/tests/alert_minibulk+simul.json new file mode 100644 index 0000000..63969b5 --- /dev/null +++ b/pkg/apiserver/tests/alert_minibulk+simul.json @@ -0,0 +1,548 @@ +[ + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": true, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.179", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.179" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.178", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.178" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + } +] \ No newline at end of file diff --git a/pkg/apiserver/tests/alert_minibulk.json b/pkg/apiserver/tests/alert_minibulk.json new file mode 100644 index 0000000..f174158 --- /dev/null +++ b/pkg/apiserver/tests/alert_minibulk.json @@ -0,0 +1,548 @@ +[ + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.179" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.179", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.179" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + }, + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.178" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over 46.375699ms) at 2020-10-26 12:52:58.200237122 +0100 CET m=+8.191478202", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.178", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.178" + }, + "start_at": "2020-10-26T12:52:58.153861334+01:00", + "stop_at": "2020-10-26T12:52:58.200236582+01:00" + } +] \ No newline at end of file diff --git a/pkg/apiserver/tests/alert_sample.json b/pkg/apiserver/tests/alert_sample.json new file mode 100644 index 0000000..3d4f21e --- /dev/null +++ b/pkg/apiserver/tests/alert_sample.json @@ -0,0 +1,77 @@ +[ + { + "id": 42, + "machine_id": "test", + "capacity": 1, + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "id": 1, + "duration": "1h", + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "Ip", + "value": "127.0.0.1", + "type": "ban" + }, + { + "id": 2, + "duration": "2h", + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "Ip", + "value": "127.0.0.1", + "type": "ban" + }, + { + "id": 3, + "duration": "3h", + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "Ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "Events": [ + { + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "timestamp": "2020-10-09T10:00:01Z" + } + ], + "events_count": 1, + "labels": [ + "test" + ], + "leakspeed": "0.5s", + "message": "test", + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "scenario": "crowdsecurity/test", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "source": { + "as_name": "test", + "as_number": "0123456", + "cn": "france", + "ip": "127.0.0.1", + "latitude": 46.227638, + "logitude": 2.213749, + "range": "127.0.0.1/32", + "scope": "ip", + "value": "127.0.0.1" + }, + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + } +] \ No newline at end of file diff --git a/pkg/apiserver/tests/alert_ssh-bf.json b/pkg/apiserver/tests/alert_ssh-bf.json new file mode 100644 index 0000000..7dd6dba --- /dev/null +++ b/pkg/apiserver/tests/alert_ssh-bf.json @@ -0,0 +1,275 @@ +[ + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over 30.18165ms) at 2020-10-26 09:50:32.055535505 +0100 CET m=+6.235529150", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "91.121.79.195", + "latitude": 50.646, + "longitude": 3.0758, + "range": "91.121.72.0/21", + "scope": "Ip", + "value": "91.121.79.195" + }, + "start_at": "2020-10-26T09:50:32.025353849+01:00", + "stop_at": "2020-10-26T09:50:32.055534398+01:00" + } +] \ No newline at end of file diff --git a/pkg/apiserver/tests/alert_stream_fixture.json b/pkg/apiserver/tests/alert_stream_fixture.json new file mode 100644 index 0000000..3d1c559 --- /dev/null +++ b/pkg/apiserver/tests/alert_stream_fixture.json @@ -0,0 +1,173 @@ +[ + { + "id": 42, + "machine_id": "test", + "capacity": 1, + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "id": 1, + "duration": "1h", + "origin": "test1", + "scenario": "crowdsecurity/http_bf", + "scope": "Ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "Events": [ + { + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "timestamp": "2020-10-09T10:00:01Z" + } + ], + "events_count": 1, + "labels": [ + "test" + ], + "leakspeed": "0.5s", + "message": "test", + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "scenario": "crowdsecurity/http_bf", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "source": { + "as_name": "test", + "as_number": "0123456", + "cn": "france", + "ip": "127.0.0.1", + "latitude": 46.227638, + "logitude": 2.213749, + "range": "127.0.0.1/32", + "scope": "ip", + "value": "127.0.0.1" + }, + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + }, + { + "id": 43, + "machine_id": "test", + "capacity": 1, + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "id": 2, + "duration": "1h", + "origin": "test2", + "scenario": "crowdsecurity/ssh_bf", + "scope": "Ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "Events": [ + { + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "timestamp": "2020-10-09T10:00:01Z" + } + ], + "events_count": 1, + "labels": [ + "test" + ], + "leakspeed": "0.5s", + "message": "test", + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "scenario": "crowdsecurity/ssh_bf", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "source": { + "as_name": "test", + "as_number": "0123456", + "cn": "france", + "ip": "127.0.0.1", + "latitude": 46.227638, + "logitude": 2.213749, + "range": "127.0.0.1/32", + "scope": "ip", + "value": "127.0.0.1" + }, + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + }, + { + "id": 44, + "machine_id": "test", + "capacity": 1, + "created_at": "2020-10-09T10:00:10Z", + "decisions": [ + { + "id": 3, + "duration": "1h", + "origin": "test3", + "scenario": "crowdsecurity/ddos", + "scope": "Ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "Events": [ + { + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "timestamp": "2020-10-09T10:00:01Z" + } + ], + "events_count": 1, + "labels": [ + "test" + ], + "leakspeed": "0.5s", + "message": "test", + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "scenario": "crowdsecurity/ddos", + "scenario_hash": "hashtest", + "scenario_version": "v1", + "simulated": false, + "source": { + "as_name": "test", + "as_number": "0123456", + "cn": "france", + "ip": "127.0.0.1", + "latitude": 46.227638, + "logitude": 2.213749, + "range": "127.0.0.1/32", + "scope": "ip", + "value": "127.0.0.1" + }, + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + } +] diff --git a/pkg/apiserver/tests/invalidAlert_sample.json b/pkg/apiserver/tests/invalidAlert_sample.json new file mode 100644 index 0000000..aac3131 --- /dev/null +++ b/pkg/apiserver/tests/invalidAlert_sample.json @@ -0,0 +1,43 @@ +[ + { + "id": 1, + "machine_id": "test", + "capacity": 1, + "decisions": [ + { + "id": 1, + "duration": "1h", + "origin": "test", + "scenario": "crowdsecurity/test", + "scope": "ip", + "value": "127.0.0.1", + "type": "ban" + } + ], + "Events": [ + { + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "timestamp": "2020-10-09T10:00:01Z" + } + ], + "events_count": 1, + "labels": [ + "test" + ], + "leakspeed": "0.5s", + "message": "test", + "meta": [ + { + "key": "test", + "value": "test" + } + ], + "start_at": "2020-10-09T10:00:01Z", + "stop_at": "2020-10-09T10:00:05Z" + } +] \ No newline at end of file diff --git a/pkg/apiserver/tests/profiles.yaml b/pkg/apiserver/tests/profiles.yaml new file mode 100644 index 0000000..5727f4e --- /dev/null +++ b/pkg/apiserver/tests/profiles.yaml @@ -0,0 +1,31 @@ + +name: enforce_mfa +#debug: true +filters: + - 'Alert.Remediation == true && Alert.GetScenario() == "crowdsecurity/ssh-enforce-mfa" && Alert.GetScope() == "username"' +decisions: #remediation vs decision + - type: enforce_mfa + scope: "username" + duration: 1h +on_success: continue +--- +name: default_ip_remediation +#debug: true +filters: +# try types.Ip here :) + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ban + duration: 1h +on_success: break +--- +#this one won't be reached ^^ +name: default_ip_remediation_2 +#debug: true +filters: +# try types.Ip here :) + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ratatatata + duration: 1h +on_success: break diff --git a/pkg/apiserver/utils.go b/pkg/apiserver/utils.go new file mode 100644 index 0000000..409d79b --- /dev/null +++ b/pkg/apiserver/utils.go @@ -0,0 +1,27 @@ +package apiserver + +import ( + "crypto/tls" + "fmt" + + log "github.com/sirupsen/logrus" +) + +func getTLSAuthType(authType string) (tls.ClientAuthType, error) { + switch authType { + case "NoClientCert": + return tls.NoClientCert, nil + case "RequestClientCert": + log.Warn("RequestClientCert is insecure, please use VerifyClientCertIfGiven or RequireAndVerifyClientCert instead") + return tls.RequestClientCert, nil + case "RequireAnyClientCert": + log.Warn("RequireAnyClientCert is insecure, please use VerifyClientCertIfGiven or RequireAndVerifyClientCert instead") + return tls.RequireAnyClientCert, nil + case "VerifyClientCertIfGiven": + return tls.VerifyClientCertIfGiven, nil + case "RequireAndVerifyClientCert": + return tls.RequireAndVerifyClientCert, nil + default: + return 0, fmt.Errorf("unknown TLS client_verification value: %s", authType) + } +} diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go new file mode 100644 index 0000000..0f94b72 --- /dev/null +++ b/pkg/csconfig/api.go @@ -0,0 +1,250 @@ +package csconfig + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "os" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/crowdsec/pkg/yamlpatch" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +type APICfg struct { + Client *LocalApiClientCfg `yaml:"client"` + Server *LocalApiServerCfg `yaml:"server"` +} + +type ApiCredentialsCfg struct { + URL string `yaml:"url,omitempty" json:"url,omitempty"` + Login string `yaml:"login,omitempty" json:"login,omitempty"` + Password string `yaml:"password,omitempty" json:"-"` + CACertPath string `yaml:"ca_cert_path,omitempty"` + KeyPath string `yaml:"key_path,omitempty"` + CertPath string `yaml:"cert_path,omitempty"` +} + +/*global api config (for lapi->oapi)*/ +type OnlineApiClientCfg struct { + CredentialsFilePath string `yaml:"credentials_path,omitempty"` //credz will be edited by software, store in diff file + Credentials *ApiCredentialsCfg `yaml:"-"` +} + +/*local api config (for crowdsec/cscli->lapi)*/ +type LocalApiClientCfg struct { + CredentialsFilePath string `yaml:"credentials_path,omitempty"` //credz will be edited by software, store in diff file + Credentials *ApiCredentialsCfg `yaml:"-"` + InsecureSkipVerify *bool `yaml:"insecure_skip_verify"` // check if api certificate is bad or not +} + +func (o *OnlineApiClientCfg) Load() error { + o.Credentials = new(ApiCredentialsCfg) + fcontent, err := os.ReadFile(o.CredentialsFilePath) + if err != nil { + return errors.Wrapf(err, "failed to read api server credentials configuration file '%s'", o.CredentialsFilePath) + } + err = yaml.UnmarshalStrict(fcontent, o.Credentials) + if err != nil { + return errors.Wrapf(err, "failed unmarshaling api server credentials configuration file '%s'", o.CredentialsFilePath) + } + if o.Credentials.Login == "" || o.Credentials.Password == "" || o.Credentials.URL == "" { + log.Warningf("can't load CAPI credentials from '%s' (missing field)", o.CredentialsFilePath) + o.Credentials = nil + } + return nil +} + +func (l *LocalApiClientCfg) Load() error { + patcher := yamlpatch.NewPatcher(l.CredentialsFilePath, ".local") + fcontent, err := patcher.MergedPatchContent() + if err != nil { + return err + } + err = yaml.UnmarshalStrict(fcontent, &l.Credentials) + if err != nil { + return errors.Wrapf(err, "failed unmarshaling api client credential configuration file '%s'", l.CredentialsFilePath) + } + if l.Credentials == nil || l.Credentials.URL == "" { + return fmt.Errorf("no credentials or URL found in api client configuration '%s'", l.CredentialsFilePath) + } + + if l.Credentials != nil && l.Credentials.URL != "" { + if !strings.HasSuffix(l.Credentials.URL, "/") { + l.Credentials.URL += "/" + } + } + + if l.Credentials.Login != "" && (l.Credentials.CACertPath != "" || l.Credentials.CertPath != "" || l.Credentials.KeyPath != "") { + return fmt.Errorf("user/password authentication and TLS authentication are mutually exclusive") + } + + if l.InsecureSkipVerify == nil { + apiclient.InsecureSkipVerify = false + } else { + apiclient.InsecureSkipVerify = *l.InsecureSkipVerify + } + + if l.Credentials.CACertPath != "" && l.Credentials.CertPath != "" && l.Credentials.KeyPath != "" { + cert, err := tls.LoadX509KeyPair(l.Credentials.CertPath, l.Credentials.KeyPath) + if err != nil { + return errors.Wrapf(err, "failed to load api client certificate") + } + + caCert, err := os.ReadFile(l.Credentials.CACertPath) + if err != nil { + return errors.Wrapf(err, "failed to load cacert") + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + apiclient.Cert = &cert + apiclient.CaCertPool = caCertPool + } + return nil +} + +func (lapiCfg *LocalApiServerCfg) GetTrustedIPs() ([]net.IPNet, error) { + trustedIPs := make([]net.IPNet, 0) + for _, ip := range lapiCfg.TrustedIPs { + cidr := toValidCIDR(ip) + _, ipNet, err := net.ParseCIDR(cidr) + if err != nil { + return nil, err + } + trustedIPs = append(trustedIPs, *ipNet) + } + return trustedIPs, nil +} + +func toValidCIDR(ip string) string { + if strings.Contains(ip, "/") { + return ip + } + + if strings.Contains(ip, ":") { + return ip + "/128" + } + return ip + "/32" +} + +/*local api service configuration*/ +type LocalApiServerCfg struct { + Enable *bool `yaml:"enable"` + ListenURI string `yaml:"listen_uri,omitempty"` //127.0.0.1:8080 + TLS *TLSCfg `yaml:"tls"` + DbConfig *DatabaseCfg `yaml:"-"` + LogDir string `yaml:"-"` + LogMedia string `yaml:"-"` + OnlineClient *OnlineApiClientCfg `yaml:"online_client"` + ProfilesPath string `yaml:"profiles_path,omitempty"` + ConsoleConfigPath string `yaml:"console_path,omitempty"` + ConsoleConfig *ConsoleConfig `yaml:"-"` + Profiles []*ProfileCfg `yaml:"-"` + LogLevel *log.Level `yaml:"log_level"` + UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"` + TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"` + CompressLogs *bool `yaml:"-"` + LogMaxSize int `yaml:"-"` + LogMaxAge int `yaml:"-"` + LogMaxFiles int `yaml:"-"` + TrustedIPs []string `yaml:"trusted_ips,omitempty"` +} + +type TLSCfg struct { + CertFilePath string `yaml:"cert_file"` + KeyFilePath string `yaml:"key_file"` + ClientVerification string `yaml:"client_verification,omitempty"` + ServerName string `yaml:"server_name"` + CACertPath string `yaml:"ca_cert_path"` + AllowedAgentsOU []string `yaml:"agents_allowed_ou"` + AllowedBouncersOU []string `yaml:"bouncers_allowed_ou"` + CRLPath string `yaml:"crl_path"` + CacheExpiration *time.Duration `yaml:"cache_expiration,omitempty"` +} + +func (c *Config) LoadAPIServer() error { + + if c.DisableAPI { + log.Warning("crowdsec local API is disabled from flag") + } + + if c.API.Server == nil { + log.Warning("crowdsec local API is disabled because its configuration is not present") + c.DisableAPI = true + return nil + } + + if c.API.Server.Enable == nil { + // if the option is not present, it is enabled by default + c.API.Server.Enable = types.BoolPtr(true) + } + + if !*c.API.Server.Enable { + log.Warning("crowdsec local API is disabled because 'enable' is set to false") + c.DisableAPI = true + return nil + } + + if c.DisableAPI { + return nil + } + + if err := c.LoadCommon(); err != nil { + return fmt.Errorf("loading common configuration: %s", err) + } + c.API.Server.LogDir = c.Common.LogDir + c.API.Server.LogMedia = c.Common.LogMedia + c.API.Server.CompressLogs = c.Common.CompressLogs + c.API.Server.LogMaxSize = c.Common.LogMaxSize + c.API.Server.LogMaxAge = c.Common.LogMaxAge + c.API.Server.LogMaxFiles = c.Common.LogMaxFiles + if c.API.Server.UseForwardedForHeaders && c.API.Server.TrustedProxies == nil { + c.API.Server.TrustedProxies = &[]string{"0.0.0.0/0"} + } + if c.API.Server.TrustedProxies != nil { + c.API.Server.UseForwardedForHeaders = true + } + if err := c.API.Server.LoadProfiles(); err != nil { + return errors.Wrap(err, "while loading profiles for LAPI") + } + if c.API.Server.ConsoleConfigPath == "" { + c.API.Server.ConsoleConfigPath = DefaultConsoleConfigFilePath + } + if err := c.API.Server.LoadConsoleConfig(); err != nil { + return errors.Wrap(err, "while loading console options") + } + + if c.API.Server.OnlineClient != nil && c.API.Server.OnlineClient.CredentialsFilePath != "" { + if err := c.API.Server.OnlineClient.Load(); err != nil { + return errors.Wrap(err, "loading online client credentials") + } + } + if c.API.Server.OnlineClient == nil || c.API.Server.OnlineClient.Credentials == nil { + log.Printf("push and pull to Central API disabled") + } + if err := c.LoadDBConfig(); err != nil { + return err + } + + return nil +} + +func (c *Config) LoadAPIClient() error { + if c.API == nil || c.API.Client == nil || c.API.Client.CredentialsFilePath == "" || c.DisableAgent { + return fmt.Errorf("no API client section in configuration") + } + + if err := c.API.Client.Load(); err != nil { + return err + } + + return nil +} diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go new file mode 100644 index 0000000..3fb793c --- /dev/null +++ b/pkg/csconfig/api_test.go @@ -0,0 +1,277 @@ +package csconfig + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v2" +) + +func TestLoadLocalApiClientCfg(t *testing.T) { + True := true + tests := []struct { + name string + Input *LocalApiClientCfg + expectedResult *ApiCredentialsCfg + err string + }{ + { + name: "basic valid configuration", + Input: &LocalApiClientCfg{ + CredentialsFilePath: "./tests/lapi-secrets.yaml", + }, + expectedResult: &ApiCredentialsCfg{ + URL: "http://localhost:8080/", + Login: "test", + Password: "testpassword", + }, + }, + { + name: "invalid configuration", + Input: &LocalApiClientCfg{ + CredentialsFilePath: "./tests/bad_lapi-secrets.yaml", + }, + expectedResult: &ApiCredentialsCfg{}, + }, + { + name: "invalid configuration filepath", + Input: &LocalApiClientCfg{ + CredentialsFilePath: "./tests/nonexist_lapi-secrets.yaml", + }, + expectedResult: nil, + }, + { + name: "valid configuration with insecure skip verify", + Input: &LocalApiClientCfg{ + CredentialsFilePath: "./tests/lapi-secrets.yaml", + InsecureSkipVerify: &True, + }, + expectedResult: &ApiCredentialsCfg{ + URL: "http://localhost:8080/", + Login: "test", + Password: "testpassword", + }, + }, + } + + for idx, test := range tests { + fmt.Printf("TEST '%s'\n", test.name) + err := test.Input.Load() + if err == nil && test.err != "" { + t.Fatalf("%d/%d expected error, didn't get it", idx, len(tests)) + } else if test.err != "" { + if !strings.HasPrefix(fmt.Sprintf("%s", err), test.err) { + t.Fatalf("%d/%d expected '%s' got '%s'", idx, len(tests), + test.err, + fmt.Sprintf("%s", err)) + } + } + + isOk := assert.Equal(t, test.expectedResult, test.Input.Credentials) + if !isOk { + t.Fatalf("test '%s' failed", test.name) + } + + } +} + +func TestLoadOnlineApiClientCfg(t *testing.T) { + tests := []struct { + name string + Input *OnlineApiClientCfg + expectedResult *ApiCredentialsCfg + err string + }{ + { + name: "basic valid configuration", + Input: &OnlineApiClientCfg{ + CredentialsFilePath: "./tests/online-api-secrets.yaml", + }, + expectedResult: &ApiCredentialsCfg{ + URL: "http://crowdsec.api", + Login: "test", + Password: "testpassword", + }, + }, + { + name: "invalid configuration", + Input: &OnlineApiClientCfg{ + CredentialsFilePath: "./tests/bad_lapi-secrets.yaml", + }, + expectedResult: &ApiCredentialsCfg{}, + err: "failed unmarshaling api server credentials", + }, + { + name: "missing field configuration", + Input: &OnlineApiClientCfg{ + CredentialsFilePath: "./tests/bad_online-api-secrets.yaml", + }, + expectedResult: nil, + }, + { + name: "invalid configuration filepath", + Input: &OnlineApiClientCfg{ + CredentialsFilePath: "./tests/nonexist_online-api-secrets.yaml", + }, + expectedResult: &ApiCredentialsCfg{}, + err: "failed to read api server credentials", + }, + } + + for idx, test := range tests { + err := test.Input.Load() + if err == nil && test.err != "" { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected error, didn't get it", idx, len(tests)) + } else if test.err != "" { + if !strings.HasPrefix(fmt.Sprintf("%s", err), test.err) { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected '%s' got '%s'", idx, len(tests), + test.err, + fmt.Sprintf("%s", err)) + } + } + + isOk := assert.Equal(t, test.expectedResult, test.Input.Credentials) + if !isOk { + t.Fatalf("TEST '%s': NOK", test.name) + } else { + fmt.Printf("TEST '%s': OK\n", test.name) + } + + } +} + +func TestLoadAPIServer(t *testing.T) { + tmpLAPI := &LocalApiServerCfg{ + ProfilesPath: "./tests/profiles.yaml", + } + if err := tmpLAPI.LoadProfiles(); err != nil { + t.Fatalf("loading tmp profiles: %+v", err) + } + + LogDirFullPath, err := filepath.Abs("./tests") + if err != nil { + t.Fatalf(err.Error()) + } + + config := &Config{} + fcontent, err := os.ReadFile("./tests/config.yaml") + if err != nil { + t.Fatalf(err.Error()) + } + configData := os.ExpandEnv(string(fcontent)) + err = yaml.UnmarshalStrict([]byte(configData), &config) + if err != nil { + t.Fatalf(err.Error()) + } + tests := []struct { + name string + Input *Config + expectedResult *LocalApiServerCfg + err string + }{ + { + name: "basic valid configuration", + Input: &Config{ + Self: []byte(configData), + API: &APICfg{ + Server: &LocalApiServerCfg{ + ListenURI: "http://crowdsec.api", + OnlineClient: &OnlineApiClientCfg{ + CredentialsFilePath: "./tests/online-api-secrets.yaml", + }, + ProfilesPath: "./tests/profiles.yaml", + }, + }, + DbConfig: &DatabaseCfg{ + Type: "sqlite", + DbPath: "./tests/test.db", + }, + Common: &CommonCfg{ + LogDir: "./tests/", + LogMedia: "stdout", + }, + DisableAPI: false, + }, + expectedResult: &LocalApiServerCfg{ + Enable: types.BoolPtr(true), + ListenURI: "http://crowdsec.api", + TLS: nil, + DbConfig: &DatabaseCfg{ + DbPath: "./tests/test.db", + Type: "sqlite", + MaxOpenConns: types.IntPtr(DEFAULT_MAX_OPEN_CONNS), + }, + ConsoleConfigPath: DefaultConfigPath("console.yaml"), + ConsoleConfig: &ConsoleConfig{ + ShareManualDecisions: types.BoolPtr(false), + ShareTaintedScenarios: types.BoolPtr(true), + ShareCustomScenarios: types.BoolPtr(true), + }, + LogDir: LogDirFullPath, + LogMedia: "stdout", + OnlineClient: &OnlineApiClientCfg{ + CredentialsFilePath: "./tests/online-api-secrets.yaml", + Credentials: &ApiCredentialsCfg{ + URL: "http://crowdsec.api", + Login: "test", + Password: "testpassword", + }, + }, + Profiles: tmpLAPI.Profiles, + ProfilesPath: "./tests/profiles.yaml", + UseForwardedForHeaders: false, + }, + err: "", + }, + { + name: "basic invalid configuration", + Input: &Config{ + Self: []byte(configData), + API: &APICfg{ + Server: &LocalApiServerCfg{}, + }, + Common: &CommonCfg{ + LogDir: "./tests/", + LogMedia: "stdout", + }, + DisableAPI: false, + }, + expectedResult: &LocalApiServerCfg{ + Enable: types.BoolPtr(true), + LogDir: LogDirFullPath, + LogMedia: "stdout", + }, + err: "while loading profiles for LAPI", + }, + } + + for idx, test := range tests { + err := test.Input.LoadAPIServer() + if err == nil && test.err != "" { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected error, didn't get it", idx, len(tests)) + } else if test.err != "" { + if !strings.HasPrefix(fmt.Sprintf("%s", err), test.err) { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected '%s' got '%s'", idx, len(tests), + test.err, + fmt.Sprintf("%s", err)) + } + } + + isOk := assert.Equal(t, test.expectedResult, test.Input.API.Server) + if !isOk { + t.Fatalf("TEST '%s': NOK", test.name) + } else { + fmt.Printf("TEST '%s': OK\n", test.name) + } + + } +} diff --git a/pkg/csconfig/common.go b/pkg/csconfig/common.go new file mode 100644 index 0000000..6add00c --- /dev/null +++ b/pkg/csconfig/common.go @@ -0,0 +1,47 @@ +package csconfig + +import ( + "fmt" + "path/filepath" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +/*daemonization/service related stuff*/ +type CommonCfg struct { + Daemonize bool + PidDir string `yaml:"pid_dir,omitempty"` // TODO: This is just for backward compat. Remove this later + LogMedia string `yaml:"log_media"` + LogDir string `yaml:"log_dir,omitempty"` //if LogMedia = file + LogLevel *log.Level `yaml:"log_level"` + WorkingDir string `yaml:"working_dir,omitempty"` ///var/run + CompressLogs *bool `yaml:"compress_logs,omitempty"` + LogMaxSize int `yaml:"log_max_size,omitempty"` + LogMaxAge int `yaml:"log_max_age,omitempty"` + LogMaxFiles int `yaml:"log_max_files,omitempty"` + ForceColorLogs bool `yaml:"force_color_logs,omitempty"` +} + +func (c *Config) LoadCommon() error { + var err error + if c.Common == nil { + return fmt.Errorf("no common block provided in configuration file") + } + + var CommonCleanup = []*string{ + &c.Common.LogDir, + &c.Common.WorkingDir, + } + for _, k := range CommonCleanup { + if *k == "" { + continue + } + *k, err = filepath.Abs(*k) + if err != nil { + return errors.Wrapf(err, "failed to get absolute path of '%s'", *k) + } + } + + return nil +} diff --git a/pkg/csconfig/common_test.go b/pkg/csconfig/common_test.go new file mode 100644 index 0000000..b049a82 --- /dev/null +++ b/pkg/csconfig/common_test.go @@ -0,0 +1,94 @@ +package csconfig + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLoadCommon(t *testing.T) { + pidDirPath := "./tests" + LogDirFullPath, err := filepath.Abs("./tests/log/") + if err != nil { + t.Fatalf(err.Error()) + } + + WorkingDirFullPath, err := filepath.Abs("./tests") + if err != nil { + t.Fatalf(err.Error()) + } + + tests := []struct { + name string + Input *Config + expectedResult *CommonCfg + err string + }{ + { + name: "basic valid configuration", + Input: &Config{ + Common: &CommonCfg{ + Daemonize: true, + PidDir: "./tests", + LogMedia: "file", + LogDir: "./tests/log/", + WorkingDir: "./tests/", + }, + }, + expectedResult: &CommonCfg{ + Daemonize: true, + PidDir: pidDirPath, + LogMedia: "file", + LogDir: LogDirFullPath, + WorkingDir: WorkingDirFullPath, + }, + }, + { + name: "empty working dir", + Input: &Config{ + Common: &CommonCfg{ + Daemonize: true, + PidDir: "./tests", + LogMedia: "file", + LogDir: "./tests/log/", + }, + }, + expectedResult: &CommonCfg{ + Daemonize: true, + PidDir: pidDirPath, + LogMedia: "file", + LogDir: LogDirFullPath, + }, + }, + { + name: "no common", + Input: &Config{}, + expectedResult: nil, + }, + } + + for idx, test := range tests { + err := test.Input.LoadCommon() + if err == nil && test.err != "" { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected error, didn't get it", idx, len(tests)) + } else if test.err != "" { + if !strings.HasPrefix(fmt.Sprintf("%s", err), test.err) { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected '%s' got '%s'", idx, len(tests), + test.err, + fmt.Sprintf("%s", err)) + } + } + + isOk := assert.Equal(t, test.expectedResult, test.Input.Common) + if !isOk { + t.Fatalf("TEST '%s': NOK", test.name) + } else { + fmt.Printf("TEST '%s': OK\n", test.name) + } + } +} diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go new file mode 100644 index 0000000..68b84b9 --- /dev/null +++ b/pkg/csconfig/config.go @@ -0,0 +1,145 @@ +package csconfig + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/crowdsec/pkg/yamlpatch" +) + +// defaultConfigDir is the base path to all configuration files, to be overridden in the Makefile */ +var defaultConfigDir = "/etc/crowdsec" + +// defaultDataDir is the base path to all data files, to be overridden in the Makefile */ +var defaultDataDir = "/var/lib/crowdsec/data/" + +// Config contains top-level defaults -> overridden by configuration file -> overridden by CLI flags +type Config struct { + //just a path to ourself :p + FilePath *string `yaml:"-"` + Self []byte `yaml:"-"` + Common *CommonCfg `yaml:"common,omitempty"` + Prometheus *PrometheusCfg `yaml:"prometheus,omitempty"` + Crowdsec *CrowdsecServiceCfg `yaml:"crowdsec_service,omitempty"` + Cscli *CscliCfg `yaml:"cscli,omitempty"` + DbConfig *DatabaseCfg `yaml:"db_config,omitempty"` + API *APICfg `yaml:"api,omitempty"` + ConfigPaths *ConfigurationPaths `yaml:"config_paths,omitempty"` + PluginConfig *PluginCfg `yaml:"plugin_config,omitempty"` + DisableAPI bool `yaml:"-"` + DisableAgent bool `yaml:"-"` + Hub *Hub `yaml:"-"` +} + +func (c *Config) Dump() error { + out, err := yaml.Marshal(c) + if err != nil { + return errors.Wrap(err, "failed marshaling config") + } + fmt.Printf("%s", string(out)) + return nil +} + +func NewConfig(configFile string, disableAgent bool, disableAPI bool) (*Config, error) { + patcher := yamlpatch.NewPatcher(configFile, ".local") + fcontent, err := patcher.MergedPatchContent() + if err != nil { + return nil, err + } + configData := os.ExpandEnv(string(fcontent)) + cfg := Config{ + FilePath: &configFile, + DisableAgent: disableAgent, + DisableAPI: disableAPI, + } + + err = yaml.UnmarshalStrict([]byte(configData), &cfg) + if err != nil { + // this is actually the "merged" yaml + return nil, errors.Wrap(err, configFile) + } + return &cfg, nil +} + +func NewDefaultConfig() *Config { + logLevel := log.InfoLevel + commonCfg := CommonCfg{ + Daemonize: false, + PidDir: "/tmp/", + LogMedia: "stdout", + //LogDir unneeded + LogLevel: &logLevel, + WorkingDir: ".", + } + prometheus := PrometheusCfg{ + Enabled: true, + Level: "full", + } + configPaths := ConfigurationPaths{ + ConfigDir: DefaultConfigPath("."), + DataDir: DefaultDataPath("."), + SimulationFilePath: DefaultConfigPath("simulation.yaml"), + HubDir: DefaultConfigPath("hub"), + HubIndexFile: DefaultConfigPath("hub", ".index.json"), + } + crowdsecCfg := CrowdsecServiceCfg{ + AcquisitionFilePath: DefaultConfigPath("acquis.yaml"), + ParserRoutinesCount: 1, + } + + cscliCfg := CscliCfg{ + Output: "human", + Color: "auto", + } + + apiCfg := APICfg{ + Client: &LocalApiClientCfg{ + CredentialsFilePath: DefaultConfigPath("lapi-secrets.yaml"), + }, + Server: &LocalApiServerCfg{ + ListenURI: "127.0.0.1:8080", + UseForwardedForHeaders: false, + OnlineClient: &OnlineApiClientCfg{ + CredentialsFilePath: DefaultConfigPath("config", "online-api-secrets.yaml"), + }, + }, + } + + dbConfig := DatabaseCfg{ + Type: "sqlite", + DbPath: DefaultDataPath("crowdsec.db"), + MaxOpenConns: types.IntPtr(DEFAULT_MAX_OPEN_CONNS), + } + + globalCfg := Config{ + Common: &commonCfg, + Prometheus: &prometheus, + Crowdsec: &crowdsecCfg, + Cscli: &cscliCfg, + API: &apiCfg, + ConfigPaths: &configPaths, + DbConfig: &dbConfig, + } + + return &globalCfg +} + +// DefaultConfigPath returns the default path for a configuration resource +// "elem" parameters are path components relative to the default cfg directory. +func DefaultConfigPath(elem ...string) string { + elem = append([]string{defaultConfigDir}, elem...) + return filepath.Join(elem...) +} + +// DefaultDataPath returns the default path for a data resource. +// "elem" parameters are path components relative to the default data directory. +func DefaultDataPath(elem ...string) string { + elem = append([]string{defaultDataDir}, elem...) + return filepath.Join(elem...) +} diff --git a/pkg/csconfig/config_paths.go b/pkg/csconfig/config_paths.go new file mode 100644 index 0000000..59be93a --- /dev/null +++ b/pkg/csconfig/config_paths.go @@ -0,0 +1,58 @@ +package csconfig + +import ( + "fmt" + "path/filepath" + + "github.com/pkg/errors" +) + +type ConfigurationPaths struct { + ConfigDir string `yaml:"config_dir"` + DataDir string `yaml:"data_dir,omitempty"` + SimulationFilePath string `yaml:"simulation_path,omitempty"` + HubIndexFile string `yaml:"index_path,omitempty"` //path of the .index.json + HubDir string `yaml:"hub_dir,omitempty"` + PluginDir string `yaml:"plugin_dir,omitempty"` + NotificationDir string `yaml:"notification_dir,omitempty"` +} + +func (c *Config) LoadConfigurationPaths() error { + var err error + if c.ConfigPaths == nil { + return fmt.Errorf("no configuration paths provided") + } + + if c.ConfigPaths.DataDir == "" { + return fmt.Errorf("please provide a data directory with the 'data_dir' directive in the 'config_paths' section") + } + + if c.ConfigPaths.HubDir == "" { + c.ConfigPaths.HubDir = filepath.Clean(c.ConfigPaths.ConfigDir + "/hub") + } + + if c.ConfigPaths.HubIndexFile == "" { + c.ConfigPaths.HubIndexFile = filepath.Clean(c.ConfigPaths.HubDir + "/.index.json") + } + + var configPathsCleanup = []*string{ + &c.ConfigPaths.HubDir, + &c.ConfigPaths.HubIndexFile, + &c.ConfigPaths.ConfigDir, + &c.ConfigPaths.DataDir, + &c.ConfigPaths.SimulationFilePath, + &c.ConfigPaths.PluginDir, + &c.ConfigPaths.NotificationDir, + } + for _, k := range configPathsCleanup { + if *k == "" { + continue + } + *k, err = filepath.Abs(*k) + if err != nil { + return errors.Wrapf(err, "failed to get absolute path of '%s'", *k) + } + } + + return nil +} diff --git a/pkg/csconfig/config_test.go b/pkg/csconfig/config_test.go new file mode 100644 index 0000000..7c3da49 --- /dev/null +++ b/pkg/csconfig/config_test.go @@ -0,0 +1,46 @@ +package csconfig + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/cstest" +) + +func TestNormalLoad(t *testing.T) { + _, err := NewConfig("./tests/config.yaml", false, false) + require.NoError(t, err) + + _, err = NewConfig("./tests/xxx.yaml", false, false) + assert.EqualError(t, err, "while reading yaml file: open ./tests/xxx.yaml: "+cstest.FileNotFoundMessage) + + _, err = NewConfig("./tests/simulation.yaml", false, false) + assert.EqualError(t, err, "./tests/simulation.yaml: yaml: unmarshal errors:\n line 1: field simulation not found in type csconfig.Config") +} + +func TestNewCrowdSecConfig(t *testing.T) { + tests := []struct { + name string + expectedResult *Config + }{ + { + name: "new configuration: basic", + expectedResult: &Config{}, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + result := &Config{} + assert.Equal(t, tc.expectedResult, result) + }) + } +} + +func TestDefaultConfig(t *testing.T) { + x := NewDefaultConfig() + err := x.Dump() + require.NoError(t, err) +} diff --git a/pkg/csconfig/console.go b/pkg/csconfig/console.go new file mode 100644 index 0000000..e00d014 --- /dev/null +++ b/pkg/csconfig/console.go @@ -0,0 +1,83 @@ +package csconfig + +import ( + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +const ( + SEND_CUSTOM_SCENARIOS = "custom" + SEND_TAINTED_SCENARIOS = "tainted" + SEND_MANUAL_SCENARIOS = "manual" +) + +var CONSOLE_CONFIGS = []string{SEND_CUSTOM_SCENARIOS, SEND_MANUAL_SCENARIOS, SEND_TAINTED_SCENARIOS} + +var DefaultConsoleConfigFilePath = DefaultConfigPath("console.yaml") + +type ConsoleConfig struct { + ShareManualDecisions *bool `yaml:"share_manual_decisions"` + ShareTaintedScenarios *bool `yaml:"share_tainted"` + ShareCustomScenarios *bool `yaml:"share_custom"` +} + +func (c *LocalApiServerCfg) LoadConsoleConfig() error { + c.ConsoleConfig = &ConsoleConfig{} + if _, err := os.Stat(c.ConsoleConfigPath); err != nil && os.IsNotExist(err) { + log.Debugf("no console configuration to load") + c.ConsoleConfig.ShareCustomScenarios = types.BoolPtr(true) + c.ConsoleConfig.ShareTaintedScenarios = types.BoolPtr(true) + c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false) + return nil + } + + yamlFile, err := os.ReadFile(c.ConsoleConfigPath) + if err != nil { + return fmt.Errorf("reading console config file '%s': %s", c.ConsoleConfigPath, err) + } + err = yaml.Unmarshal(yamlFile, c.ConsoleConfig) + if err != nil { + return fmt.Errorf("unmarshaling console config file '%s': %s", c.ConsoleConfigPath, err) + } + + if c.ConsoleConfig.ShareCustomScenarios == nil { + log.Debugf("no share_custom scenarios found, setting to true") + c.ConsoleConfig.ShareCustomScenarios = types.BoolPtr(true) + } + if c.ConsoleConfig.ShareTaintedScenarios == nil { + log.Debugf("no share_tainted scenarios found, setting to true") + c.ConsoleConfig.ShareTaintedScenarios = types.BoolPtr(true) + } + if c.ConsoleConfig.ShareManualDecisions == nil { + log.Debugf("no share_manual scenarios found, setting to false") + c.ConsoleConfig.ShareManualDecisions = types.BoolPtr(false) + } + log.Debugf("Console configuration '%s' loaded successfully", c.ConsoleConfigPath) + + return nil +} + +func (c *LocalApiServerCfg) DumpConsoleConfig() error { + var out []byte + var err error + + if out, err = yaml.Marshal(c.ConsoleConfig); err != nil { + return errors.Wrapf(err, "while marshaling ConsoleConfig (for %s)", c.ConsoleConfigPath) + } + if c.ConsoleConfigPath == "" { + c.ConsoleConfigPath = DefaultConsoleConfigFilePath + log.Debugf("Empty console_path, defaulting to %s", c.ConsoleConfigPath) + + } + + if err := os.WriteFile(c.ConsoleConfigPath, out, 0600); err != nil { + return errors.Wrapf(err, "while dumping console config to %s", c.ConsoleConfigPath) + } + + return nil +} diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go new file mode 100644 index 0000000..91b5650 --- /dev/null +++ b/pkg/csconfig/crowdsec_service.go @@ -0,0 +1,156 @@ +package csconfig + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// CrowdsecServiceCfg contains the location of parsers/scenarios/... and acquisition files +type CrowdsecServiceCfg struct { + Enable *bool `yaml:"enable"` + AcquisitionFilePath string `yaml:"acquisition_path,omitempty"` + AcquisitionDirPath string `yaml:"acquisition_dir,omitempty"` + + AcquisitionFiles []string `yaml:"-"` + ParserRoutinesCount int `yaml:"parser_routines"` + BucketsRoutinesCount int `yaml:"buckets_routines"` + OutputRoutinesCount int `yaml:"output_routines"` + SimulationConfig *SimulationConfig `yaml:"-"` + LintOnly bool `yaml:"-"` // if set to true, exit after loading configs + BucketStateFile string `yaml:"state_input_file,omitempty"` // if we need to unserialize buckets at start + BucketStateDumpDir string `yaml:"state_output_dir,omitempty"` // if we need to unserialize buckets on shutdown + BucketsGCEnabled bool `yaml:"-"` // we need to garbage collect buckets when in forensic mode + + HubDir string `yaml:"-"` + DataDir string `yaml:"-"` + ConfigDir string `yaml:"-"` + HubIndexFile string `yaml:"-"` + SimulationFilePath string `yaml:"-"` +} + +func (c *Config) LoadCrowdsec() error { + var err error + + // Configuration paths are dependency to load crowdsec configuration + if err = c.LoadConfigurationPaths(); err != nil { + return err + } + + if c.Crowdsec == nil { + log.Warning("crowdsec agent is disabled") + c.DisableAgent = true + return nil + } + + if c.Crowdsec.Enable == nil { + // if the option is not present, it is enabled by default + c.Crowdsec.Enable = types.BoolPtr(true) + } + + if !*c.Crowdsec.Enable { + log.Warning("crowdsec agent is disabled") + c.DisableAgent = true + return nil + } + + if c.Crowdsec.AcquisitionFiles == nil { + c.Crowdsec.AcquisitionFiles = []string{} + } + + if c.Crowdsec.AcquisitionFilePath != "" { + log.Debugf("non-empty acquisition_path %s", c.Crowdsec.AcquisitionFilePath) + if _, err = os.Stat(c.Crowdsec.AcquisitionFilePath); err != nil { + return fmt.Errorf("while checking acquisition_path: %w", err) + } + c.Crowdsec.AcquisitionFiles = append(c.Crowdsec.AcquisitionFiles, c.Crowdsec.AcquisitionFilePath) + } + + if c.Crowdsec.AcquisitionDirPath != "" { + c.Crowdsec.AcquisitionDirPath, err = filepath.Abs(c.Crowdsec.AcquisitionDirPath) + if err != nil { + return errors.Wrapf(err, "can't get absolute path of '%s'", c.Crowdsec.AcquisitionDirPath) + } + + var files []string + + files, err = filepath.Glob(c.Crowdsec.AcquisitionDirPath + "/*.yaml") + if err != nil { + return errors.Wrap(err, "while globbing acquis_dir") + } + c.Crowdsec.AcquisitionFiles = append(c.Crowdsec.AcquisitionFiles, files...) + + files, err = filepath.Glob(c.Crowdsec.AcquisitionDirPath + "/*.yml") + if err != nil { + return errors.Wrap(err, "while globbing acquis_dir") + } + c.Crowdsec.AcquisitionFiles = append(c.Crowdsec.AcquisitionFiles, files...) + } + + if c.Crowdsec.AcquisitionDirPath == "" && c.Crowdsec.AcquisitionFilePath == "" { + log.Warning("no acquisition_path or acquisition_dir specified") + } + + if len(c.Crowdsec.AcquisitionFiles) == 0 { + log.Warning("no acquisition file found") + } + + if err = c.LoadSimulation(); err != nil { + return errors.Wrap(err, "load error (simulation)") + } + + c.Crowdsec.ConfigDir = c.ConfigPaths.ConfigDir + c.Crowdsec.DataDir = c.ConfigPaths.DataDir + c.Crowdsec.HubDir = c.ConfigPaths.HubDir + c.Crowdsec.HubIndexFile = c.ConfigPaths.HubIndexFile + + if c.Crowdsec.ParserRoutinesCount <= 0 { + c.Crowdsec.ParserRoutinesCount = 1 + } + + if c.Crowdsec.BucketsRoutinesCount <= 0 { + c.Crowdsec.BucketsRoutinesCount = 1 + } + + if c.Crowdsec.OutputRoutinesCount <= 0 { + c.Crowdsec.OutputRoutinesCount = 1 + } + + var crowdsecCleanup = []*string{ + &c.Crowdsec.AcquisitionFilePath, + } + + for _, k := range crowdsecCleanup { + if *k == "" { + continue + } + *k, err = filepath.Abs(*k) + if err != nil { + return errors.Wrapf(err, "failed to get absolute path of '%s'", *k) + } + } + + // Convert relative paths to absolute paths + for i, file := range c.Crowdsec.AcquisitionFiles { + f, err := filepath.Abs(file) + if err != nil { + return errors.Wrapf(err, "failed to get absolute path of '%s'", file) + } + c.Crowdsec.AcquisitionFiles[i] = f + } + + if err := c.LoadAPIClient(); err != nil { + return fmt.Errorf("loading api client: %s", err) + } + + if err := c.LoadHub(); err != nil { + return errors.Wrap(err, "while loading hub") + } + + return nil +} diff --git a/pkg/csconfig/crowdsec_service_test.go b/pkg/csconfig/crowdsec_service_test.go new file mode 100644 index 0000000..b835d56 --- /dev/null +++ b/pkg/csconfig/crowdsec_service_test.go @@ -0,0 +1,193 @@ +package csconfig + +import ( + "fmt" + "path/filepath" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/stretchr/testify/require" +) + +func TestLoadCrowdsec(t *testing.T) { + falseBoolPtr := false + acquisFullPath, err := filepath.Abs("./tests/acquis.yaml") + require.NoError(t, err) + + acquisInDirFullPath, err := filepath.Abs("./tests/acquis/acquis.yaml") + require.NoError(t, err) + + acquisDirFullPath, err := filepath.Abs("./tests/acquis") + require.NoError(t, err) + + hubFullPath, err := filepath.Abs("./hub") + require.NoError(t, err) + + dataFullPath, err := filepath.Abs("./data") + require.NoError(t, err) + + configDirFullPath, err := filepath.Abs("./tests") + require.NoError(t, err) + + hubIndexFileFullPath, err := filepath.Abs("./hub/.index.json") + require.NoError(t, err) + + tests := []struct { + name string + input *Config + expectedResult *CrowdsecServiceCfg + expectedErr string + }{ + { + name: "basic valid configuration", + input: &Config{ + ConfigPaths: &ConfigurationPaths{ + ConfigDir: "./tests", + DataDir: "./data", + HubDir: "./hub", + }, + API: &APICfg{ + Client: &LocalApiClientCfg{ + CredentialsFilePath: "./tests/lapi-secrets.yaml", + }, + }, + Crowdsec: &CrowdsecServiceCfg{ + AcquisitionFilePath: "./tests/acquis.yaml", + SimulationFilePath: "./tests/simulation.yaml", + }, + }, + expectedResult: &CrowdsecServiceCfg{ + Enable: types.BoolPtr(true), + AcquisitionDirPath: "", + AcquisitionFilePath: acquisFullPath, + ConfigDir: configDirFullPath, + DataDir: dataFullPath, + HubDir: hubFullPath, + HubIndexFile: hubIndexFileFullPath, + BucketsRoutinesCount: 1, + ParserRoutinesCount: 1, + OutputRoutinesCount: 1, + AcquisitionFiles: []string{acquisFullPath}, + SimulationFilePath: "./tests/simulation.yaml", + SimulationConfig: &SimulationConfig{ + Simulation: &falseBoolPtr, + }, + }, + }, + { + name: "basic valid configuration with acquisition dir", + input: &Config{ + ConfigPaths: &ConfigurationPaths{ + ConfigDir: "./tests", + DataDir: "./data", + HubDir: "./hub", + }, + API: &APICfg{ + Client: &LocalApiClientCfg{ + CredentialsFilePath: "./tests/lapi-secrets.yaml", + }, + }, + Crowdsec: &CrowdsecServiceCfg{ + AcquisitionFilePath: "./tests/acquis.yaml", + AcquisitionDirPath: "./tests/acquis/", + SimulationFilePath: "./tests/simulation.yaml", + }, + }, + expectedResult: &CrowdsecServiceCfg{ + Enable: types.BoolPtr(true), + AcquisitionDirPath: acquisDirFullPath, + AcquisitionFilePath: acquisFullPath, + ConfigDir: configDirFullPath, + HubIndexFile: hubIndexFileFullPath, + DataDir: dataFullPath, + HubDir: hubFullPath, + BucketsRoutinesCount: 1, + ParserRoutinesCount: 1, + OutputRoutinesCount: 1, + AcquisitionFiles: []string{acquisFullPath, acquisInDirFullPath}, + SimulationFilePath: "./tests/simulation.yaml", + SimulationConfig: &SimulationConfig{ + Simulation: &falseBoolPtr, + }, + }, + }, + { + name: "no acquisition file and dir", + input: &Config{ + ConfigPaths: &ConfigurationPaths{ + ConfigDir: "./tests", + DataDir: "./data", + HubDir: "./hub", + }, + API: &APICfg{ + Client: &LocalApiClientCfg{ + CredentialsFilePath: "./tests/lapi-secrets.yaml", + }, + }, + Crowdsec: &CrowdsecServiceCfg{}, + }, + expectedResult: &CrowdsecServiceCfg{ + Enable: types.BoolPtr(true), + AcquisitionDirPath: "", + AcquisitionFilePath: "", + ConfigDir: configDirFullPath, + HubIndexFile: hubIndexFileFullPath, + DataDir: dataFullPath, + HubDir: hubFullPath, + BucketsRoutinesCount: 1, + ParserRoutinesCount: 1, + OutputRoutinesCount: 1, + AcquisitionFiles: []string{}, + SimulationFilePath: "", + SimulationConfig: &SimulationConfig{ + Simulation: &falseBoolPtr, + }, + }, + }, + { + name: "non existing acquisition file", + input: &Config{ + ConfigPaths: &ConfigurationPaths{ + ConfigDir: "./tests", + DataDir: "./data", + HubDir: "./hub", + }, + API: &APICfg{ + Client: &LocalApiClientCfg{ + CredentialsFilePath: "./tests/lapi-secrets.yaml", + }, + }, + Crowdsec: &CrowdsecServiceCfg{ + AcquisitionFilePath: "./tests/acquis_not_exist.yaml", + }, + }, + expectedErr: cstest.FileNotFoundMessage, + }, + { + name: "agent disabled", + input: &Config{ + ConfigPaths: &ConfigurationPaths{ + ConfigDir: "./tests", + DataDir: "./data", + HubDir: "./hub", + }, + }, + expectedResult: nil, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + fmt.Printf("TEST '%s'\n", tc.name) + err := tc.input.LoadCrowdsec() + cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { + return + } + + require.Equal(t, tc.expectedResult, tc.input.Crowdsec) + }) + } +} diff --git a/pkg/csconfig/cscli.go b/pkg/csconfig/cscli.go new file mode 100644 index 0000000..6b0bf5a --- /dev/null +++ b/pkg/csconfig/cscli.go @@ -0,0 +1,31 @@ +package csconfig + +/*cscli specific config, such as hub directory*/ +type CscliCfg struct { + Output string `yaml:"output,omitempty"` + Color string `yaml:"color,omitempty"` + HubBranch string `yaml:"hub_branch"` + SimulationConfig *SimulationConfig `yaml:"-"` + DbConfig *DatabaseCfg `yaml:"-"` + HubDir string `yaml:"-"` + DataDir string `yaml:"-"` + ConfigDir string `yaml:"-"` + HubIndexFile string `yaml:"-"` + SimulationFilePath string `yaml:"-"` + PrometheusUrl string `yaml:"prometheus_uri"` +} + +func (c *Config) LoadCSCLI() error { + if c.Cscli == nil { + c.Cscli = &CscliCfg{} + } + if err := c.LoadConfigurationPaths(); err != nil { + return err + } + c.Cscli.ConfigDir = c.ConfigPaths.ConfigDir + c.Cscli.DataDir = c.ConfigPaths.DataDir + c.Cscli.HubDir = c.ConfigPaths.HubDir + c.Cscli.HubIndexFile = c.ConfigPaths.HubIndexFile + + return nil +} diff --git a/pkg/csconfig/cscli_test.go b/pkg/csconfig/cscli_test.go new file mode 100644 index 0000000..f287afd --- /dev/null +++ b/pkg/csconfig/cscli_test.go @@ -0,0 +1,84 @@ +package csconfig + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLoadCSCLI(t *testing.T) { + hubFullPath, err := filepath.Abs("./hub") + if err != nil { + t.Fatalf(err.Error()) + } + + dataFullPath, err := filepath.Abs("./data") + if err != nil { + t.Fatalf(err.Error()) + } + + configDirFullPath, err := filepath.Abs("./tests") + if err != nil { + t.Fatalf(err.Error()) + } + + hubIndexFileFullPath, err := filepath.Abs("./hub/.index.json") + if err != nil { + t.Fatalf(err.Error()) + } + + tests := []struct { + name string + Input *Config + expectedResult *CscliCfg + err string + }{ + { + name: "basic valid configuration", + Input: &Config{ + ConfigPaths: &ConfigurationPaths{ + ConfigDir: "./tests", + DataDir: "./data", + HubDir: "./hub", + HubIndexFile: "./hub/.index.json", + }, + }, + expectedResult: &CscliCfg{ + ConfigDir: configDirFullPath, + DataDir: dataFullPath, + HubDir: hubFullPath, + HubIndexFile: hubIndexFileFullPath, + }, + }, + { + name: "no configuration path", + Input: &Config{}, + expectedResult: &CscliCfg{}, + }, + } + + for idx, test := range tests { + err := test.Input.LoadCSCLI() + if err == nil && test.err != "" { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected error, didn't get it", idx, len(tests)) + } else if test.err != "" { + if !strings.HasPrefix(fmt.Sprintf("%s", err), test.err) { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected '%s' got '%s'", idx, len(tests), + test.err, + fmt.Sprintf("%s", err)) + } + } + + isOk := assert.Equal(t, test.expectedResult, test.Input.Cscli) + if !isOk { + t.Fatalf("TEST '%s': NOK", test.name) + } else { + fmt.Printf("TEST '%s': OK\n", test.name) + } + } +} diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go new file mode 100644 index 0000000..85b75d7 --- /dev/null +++ b/pkg/csconfig/database.go @@ -0,0 +1,69 @@ +package csconfig + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +var DEFAULT_MAX_OPEN_CONNS = 100 + +type DatabaseCfg struct { + User string `yaml:"user"` + Password string `yaml:"password"` + DbName string `yaml:"db_name"` + Sslmode string `yaml:"sslmode"` + Host string `yaml:"host"` + Port int `yaml:"port"` + DbPath string `yaml:"db_path"` + Type string `yaml:"type"` + Flush *FlushDBCfg `yaml:"flush"` + LogLevel *log.Level `yaml:"log_level"` + MaxOpenConns *int `yaml:"max_open_conns,omitempty"` + UseWal *bool `yaml:"use_wal,omitempty"` +} + +type AuthGCCfg struct { + Cert *string `yaml:"cert,omitempty"` + CertDuration *time.Duration + Api *string `yaml:"api_key,omitempty"` + ApiDuration *time.Duration + LoginPassword *string `yaml:"login_password,omitempty"` + LoginPasswordDuration *time.Duration +} + +type FlushDBCfg struct { + MaxItems *int `yaml:"max_items,omitempty"` + MaxAge *string `yaml:"max_age,omitempty"` + BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` + AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` +} + +func (c *Config) LoadDBConfig() error { + if c.DbConfig == nil { + return fmt.Errorf("no database configuration provided") + } + + if c.Cscli != nil { + c.Cscli.DbConfig = c.DbConfig + } + + if c.API != nil && c.API.Server != nil { + c.API.Server.DbConfig = c.DbConfig + } + + if c.DbConfig.MaxOpenConns == nil { + c.DbConfig.MaxOpenConns = types.IntPtr(DEFAULT_MAX_OPEN_CONNS) + } + + if c.DbConfig.Type == "sqlite" { + if c.DbConfig.UseWal == nil { + log.Warning("You are using sqlite without WAL, this can have an impact of performance. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.") + } + + } + + return nil +} diff --git a/pkg/csconfig/database_test.go b/pkg/csconfig/database_test.go new file mode 100644 index 0000000..b029f38 --- /dev/null +++ b/pkg/csconfig/database_test.go @@ -0,0 +1,65 @@ +package csconfig + +import ( + "fmt" + "strings" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/stretchr/testify/assert" +) + +func TestLoadDBConfig(t *testing.T) { + tests := []struct { + name string + Input *Config + expectedResult *DatabaseCfg + err string + }{ + { + name: "basic valid configuration", + Input: &Config{ + DbConfig: &DatabaseCfg{ + Type: "sqlite", + DbPath: "./tests/test.db", + MaxOpenConns: types.IntPtr(10), + }, + Cscli: &CscliCfg{}, + API: &APICfg{ + Server: &LocalApiServerCfg{}, + }, + }, + expectedResult: &DatabaseCfg{ + Type: "sqlite", + DbPath: "./tests/test.db", + MaxOpenConns: types.IntPtr(10), + }, + }, + { + name: "no configuration path", + Input: &Config{}, + expectedResult: nil, + }, + } + + for idx, test := range tests { + err := test.Input.LoadDBConfig() + if err == nil && test.err != "" { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected error, didn't get it", idx, len(tests)) + } else if test.err != "" { + if !strings.HasPrefix(fmt.Sprintf("%s", err), test.err) { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected '%s' got '%s'", idx, len(tests), + test.err, + fmt.Sprintf("%s", err)) + } + } + isOk := assert.Equal(t, test.expectedResult, test.Input.DbConfig) + if !isOk { + t.Fatalf("TEST '%s': NOK", test.name) + } else { + fmt.Printf("TEST '%s': OK\n", test.name) + } + } +} diff --git a/pkg/csconfig/hub.go b/pkg/csconfig/hub.go new file mode 100644 index 0000000..eb3bd7c --- /dev/null +++ b/pkg/csconfig/hub.go @@ -0,0 +1,24 @@ +package csconfig + +/*cscli specific config, such as hub directory*/ +type Hub struct { + HubDir string `yaml:"-"` + ConfigDir string `yaml:"-"` + HubIndexFile string `yaml:"-"` + DataDir string `yaml:"-"` +} + +func (c *Config) LoadHub() error { + if err := c.LoadConfigurationPaths(); err != nil { + return err + } + + c.Hub = &Hub{ + HubIndexFile: c.ConfigPaths.HubIndexFile, + ConfigDir: c.ConfigPaths.ConfigDir, + HubDir: c.ConfigPaths.HubDir, + DataDir: c.ConfigPaths.DataDir, + } + + return nil +} diff --git a/pkg/csconfig/hub_test.go b/pkg/csconfig/hub_test.go new file mode 100644 index 0000000..136790d --- /dev/null +++ b/pkg/csconfig/hub_test.go @@ -0,0 +1,94 @@ +package csconfig + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLoadHub(t *testing.T) { + hubFullPath, err := filepath.Abs("./hub") + if err != nil { + t.Fatal(err) + } + + dataFullPath, err := filepath.Abs("./data") + if err != nil { + t.Fatal(err) + } + + configDirFullPath, err := filepath.Abs("./tests") + if err != nil { + t.Fatal(err) + } + + hubIndexFileFullPath, err := filepath.Abs("./hub/.index.json") + if err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + Input *Config + expectedResult *Hub + err string + }{ + { + name: "basic valid configuration", + Input: &Config{ + ConfigPaths: &ConfigurationPaths{ + ConfigDir: "./tests", + DataDir: "./data", + HubDir: "./hub", + HubIndexFile: "./hub/.index.json", + }, + }, + expectedResult: &Hub{ + ConfigDir: configDirFullPath, + DataDir: dataFullPath, + HubDir: hubFullPath, + HubIndexFile: hubIndexFileFullPath, + }, + }, + { + name: "no data dir", + Input: &Config{ + ConfigPaths: &ConfigurationPaths{ + ConfigDir: "./tests", + HubDir: "./hub", + HubIndexFile: "./hub/.index.json", + }, + }, + expectedResult: nil, + }, + { + name: "no configuration path", + Input: &Config{}, + expectedResult: nil, + }, + } + + for idx, test := range tests { + err := test.Input.LoadHub() + if err == nil && test.err != "" { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected error, didn't get it", idx, len(tests)) + } else if test.err != "" { + if !strings.HasPrefix(fmt.Sprintf("%s", err), test.err) { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected '%s' got '%s'", idx, len(tests), + test.err, + fmt.Sprintf("%s", err)) + } + } + isOk := assert.Equal(t, test.expectedResult, test.Input.Hub) + if !isOk { + t.Fatalf("TEST '%s': NOK", test.name) + } else { + fmt.Printf("TEST '%s': OK\n", test.name) + } + } +} diff --git a/pkg/csconfig/plugin_config.go b/pkg/csconfig/plugin_config.go new file mode 100644 index 0000000..3c82ee2 --- /dev/null +++ b/pkg/csconfig/plugin_config.go @@ -0,0 +1,6 @@ +package csconfig + +type PluginCfg struct { + User string + Group string +} diff --git a/pkg/csconfig/profiles.go b/pkg/csconfig/profiles.go new file mode 100644 index 0000000..05072c2 --- /dev/null +++ b/pkg/csconfig/profiles.go @@ -0,0 +1,57 @@ +package csconfig + +import ( + "bytes" + "fmt" + "io" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/yamlpatch" + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +//Profile structure(s) are used by the local API to "decide" what kind of decision should be applied when a scenario with an active remediation has been triggered +type ProfileCfg struct { + Name string `yaml:"name,omitempty"` + Debug *bool `yaml:"debug,omitempty"` + Filters []string `yaml:"filters,omitempty"` //A list of OR'ed expressions. the models.Alert object + Decisions []models.Decision `yaml:"decisions,omitempty"` + DurationExpr string `yaml:"duration_expr,omitempty"` + OnSuccess string `yaml:"on_success,omitempty"` //continue or break + OnFailure string `yaml:"on_failure,omitempty"` //continue or break + Notifications []string `yaml:"notifications,omitempty"` +} + +func (c *LocalApiServerCfg) LoadProfiles() error { + if c.ProfilesPath == "" { + return fmt.Errorf("empty profiles path") + } + + patcher := yamlpatch.NewPatcher(c.ProfilesPath, ".local") + fcontent, err := patcher.PrependedPatchContent() + if err != nil { + return err + } + reader := bytes.NewReader(fcontent) + + //process the yaml + dec := yaml.NewDecoder(reader) + dec.SetStrict(true) + for { + t := ProfileCfg{} + err = dec.Decode(&t) + if err != nil { + if err == io.EOF { + break + } + return errors.Wrapf(err, "while decoding %s", c.ProfilesPath) + } + c.Profiles = append(c.Profiles, &t) + } + + if len(c.Profiles) == 0 { + return fmt.Errorf("zero profiles loaded for LAPI") + } + return nil +} diff --git a/pkg/csconfig/prometheus.go b/pkg/csconfig/prometheus.go new file mode 100644 index 0000000..31df851 --- /dev/null +++ b/pkg/csconfig/prometheus.go @@ -0,0 +1,21 @@ +package csconfig + +import "fmt" + +/**/ +type PrometheusCfg struct { + Enabled bool `yaml:"enabled"` + Level string `yaml:"level"` //aggregated|full + ListenAddr string `yaml:"listen_addr"` + ListenPort int `yaml:"listen_port"` +} + +func (c *Config) LoadPrometheus() error { + if c.Cscli != nil && c.Cscli.PrometheusUrl == "" && c.Prometheus != nil { + if c.Prometheus.ListenAddr != "" && c.Prometheus.ListenPort != 0 { + c.Cscli.PrometheusUrl = fmt.Sprintf("http://%s:%d", c.Prometheus.ListenAddr, c.Prometheus.ListenPort) + } + } + + return nil +} diff --git a/pkg/csconfig/prometheus_test.go b/pkg/csconfig/prometheus_test.go new file mode 100644 index 0000000..f7a483d --- /dev/null +++ b/pkg/csconfig/prometheus_test.go @@ -0,0 +1,55 @@ +package csconfig + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLoadPrometheus(t *testing.T) { + + tests := []struct { + name string + Input *Config + expectedResult string + err string + }{ + { + name: "basic valid configuration", + Input: &Config{ + Prometheus: &PrometheusCfg{ + Enabled: true, + Level: "full", + ListenAddr: "127.0.0.1", + ListenPort: 6060, + }, + Cscli: &CscliCfg{}, + }, + expectedResult: "http://127.0.0.1:6060", + }, + } + + for idx, test := range tests { + err := test.Input.LoadPrometheus() + if err == nil && test.err != "" { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected error, didn't get it", idx, len(tests)) + } else if test.err != "" { + if !strings.HasPrefix(fmt.Sprintf("%s", err), test.err) { + fmt.Printf("TEST '%s': NOK\n", test.name) + t.Fatalf("%d/%d expected '%s' got '%s'", idx, len(tests), + test.err, + fmt.Sprintf("%s", err)) + } + } + + isOk := assert.Equal(t, test.expectedResult, test.Input.Cscli.PrometheusUrl) + if !isOk { + t.Fatalf("test '%s' failed\n", test.name) + } else { + fmt.Printf("TEST '%s': OK\n", test.name) + } + } +} diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go new file mode 100644 index 0000000..69c520c --- /dev/null +++ b/pkg/csconfig/simulation.go @@ -0,0 +1,60 @@ +package csconfig + +import ( + "fmt" + "path/filepath" + + "github.com/crowdsecurity/crowdsec/pkg/yamlpatch" + "gopkg.in/yaml.v2" +) + +type SimulationConfig struct { + Simulation *bool `yaml:"simulation"` + Exclusions []string `yaml:"exclusions,omitempty"` +} + +func (s *SimulationConfig) IsSimulated(scenario string) bool { + var simulated bool + + if s.Simulation != nil && *s.Simulation { + simulated = true + } + for _, excluded := range s.Exclusions { + if excluded == scenario { + simulated = !simulated + break + } + } + return simulated +} + +func (c *Config) LoadSimulation() error { + + if err := c.LoadConfigurationPaths(); err != nil { + return err + } + + simCfg := SimulationConfig{} + if c.ConfigPaths.SimulationFilePath == "" { + c.ConfigPaths.SimulationFilePath = filepath.Clean(c.ConfigPaths.ConfigDir + "/simulation.yaml") + } + + patcher := yamlpatch.NewPatcher(c.ConfigPaths.SimulationFilePath, ".local") + rcfg, err := patcher.MergedPatchContent() + if err != nil { + return err + } + if err := yaml.UnmarshalStrict(rcfg, &simCfg); err != nil { + return fmt.Errorf("while unmarshaling simulation file '%s' : %s", c.ConfigPaths.SimulationFilePath, err) + } + if simCfg.Simulation == nil { + simCfg.Simulation = new(bool) + } + if c.Crowdsec != nil { + c.Crowdsec.SimulationConfig = &simCfg + } + if c.Cscli != nil { + c.Cscli.SimulationConfig = &simCfg + } + return nil +} diff --git a/pkg/csconfig/simulation_test.go b/pkg/csconfig/simulation_test.go new file mode 100644 index 0000000..5256806 --- /dev/null +++ b/pkg/csconfig/simulation_test.go @@ -0,0 +1,140 @@ +package csconfig + +import ( + "fmt" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/cstest" +) + +func TestSimulationLoading(t *testing.T) { + testXXFullPath, err := filepath.Abs("./tests/xxx.yaml") + require.NoError(t, err) + + badYamlFullPath, err := filepath.Abs("./tests/config.yaml") + require.NoError(t, err) + + tests := []struct { + name string + Input *Config + expectedResult *SimulationConfig + expectedErr string + }{ + { + name: "basic valid simulation", + Input: &Config{ + ConfigPaths: &ConfigurationPaths{ + SimulationFilePath: "./tests/simulation.yaml", + DataDir: "./data", + }, + Crowdsec: &CrowdsecServiceCfg{}, + Cscli: &CscliCfg{}, + }, + expectedResult: &SimulationConfig{Simulation: new(bool)}, + }, + { + name: "basic nil config", + Input: &Config{ + ConfigPaths: &ConfigurationPaths{ + SimulationFilePath: "", + DataDir: "./data", + }, + Crowdsec: &CrowdsecServiceCfg{}, + }, + expectedErr: "simulation.yaml: "+cstest.FileNotFoundMessage, + }, + { + name: "basic bad file name", + Input: &Config{ + ConfigPaths: &ConfigurationPaths{ + SimulationFilePath: "./tests/xxx.yaml", + DataDir: "./data", + }, + Crowdsec: &CrowdsecServiceCfg{}, + }, + expectedErr: fmt.Sprintf("while reading yaml file: open %s: %s", testXXFullPath, cstest.FileNotFoundMessage), + }, + { + name: "basic bad file content", + Input: &Config{ + ConfigPaths: &ConfigurationPaths{ + SimulationFilePath: "./tests/config.yaml", + DataDir: "./data", + }, + Crowdsec: &CrowdsecServiceCfg{}, + }, + expectedErr: fmt.Sprintf("while unmarshaling simulation file '%s' : yaml: unmarshal errors", badYamlFullPath), + }, + { + name: "basic bad file content", + Input: &Config{ + ConfigPaths: &ConfigurationPaths{ + SimulationFilePath: "./tests/config.yaml", + DataDir: "./data", + }, + Crowdsec: &CrowdsecServiceCfg{}, + }, + expectedErr: fmt.Sprintf("while unmarshaling simulation file '%s' : yaml: unmarshal errors", badYamlFullPath), + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + err := tc.Input.LoadSimulation() + cstest.RequireErrorContains(t, err, tc.expectedErr) + + assert.Equal(t, tc.expectedResult, tc.Input.Crowdsec.SimulationConfig) + }) + } +} + +func TestIsSimulated(t *testing.T) { + simCfgOff := &SimulationConfig{ + Simulation: new(bool), + Exclusions: []string{"test"}, + } + + simCfgOn := &SimulationConfig{ + Simulation: new(bool), + Exclusions: []string{"test"}, + } + *simCfgOn.Simulation = true + + tests := []struct { + name string + SimulationConfig *SimulationConfig + Input string + expectedResult bool + }{ + { + name: "No simulation except (in exclusion)", + SimulationConfig: simCfgOff, + Input: "test", + expectedResult: true, + }, + { + name: "All simulation (not in exclusion)", + SimulationConfig: simCfgOn, + Input: "toto", + expectedResult: true, + }, + { + name: "All simulation (in exclusion)", + SimulationConfig: simCfgOn, + Input: "test", + expectedResult: false, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + IsSimulated := tc.SimulationConfig.IsSimulated(tc.Input) + require.Equal(t, tc.expectedResult, IsSimulated) + }) + } +} diff --git a/pkg/csconfig/tests/acquis.yaml b/pkg/csconfig/tests/acquis.yaml new file mode 100644 index 0000000..e69de29 diff --git a/pkg/csconfig/tests/acquis/acquis.yaml b/pkg/csconfig/tests/acquis/acquis.yaml new file mode 100644 index 0000000..e69de29 diff --git a/pkg/csconfig/tests/bad_lapi-secrets.yaml b/pkg/csconfig/tests/bad_lapi-secrets.yaml new file mode 100644 index 0000000..b7d536d --- /dev/null +++ b/pkg/csconfig/tests/bad_lapi-secrets.yaml @@ -0,0 +1 @@ +unknown_key: test \ No newline at end of file diff --git a/pkg/csconfig/tests/bad_online-api-secrets.yaml b/pkg/csconfig/tests/bad_online-api-secrets.yaml new file mode 100644 index 0000000..baeff99 --- /dev/null +++ b/pkg/csconfig/tests/bad_online-api-secrets.yaml @@ -0,0 +1,3 @@ +login: test +password: +url: \ No newline at end of file diff --git a/pkg/csconfig/tests/config.yaml b/pkg/csconfig/tests/config.yaml new file mode 100644 index 0000000..ffa993b --- /dev/null +++ b/pkg/csconfig/tests/config.yaml @@ -0,0 +1,38 @@ +common: + daemonize: false + pid_dir: /tmp/ + log_media: stdout + log_level: info + working_dir: . +prometheus: + enabled: true + level: full +crowdsec_service: + acquisition_path: ./tests/acquis.yaml + parser_routines: 1 +cscli: + output: human +db_config: + user: "" + password: "" + db_name: "" + host: "" + port: 0 + db_path: ./crowdsec.db + type: sqlite +api: + client: + credentials_path: ./tests/lapi-secrets.yaml + server: + profiles_path: ./tests/profiles.yaml + listen_uri: 127.0.0.1:8080 + tls: null + online_client: + credentials_path: ./tests/online-api-secrets.yaml +config_paths: + config_dir: ./tests + data_dir: . + simulation_path: ./tests/simulation.yaml + index_path: ./tests/hub/.index.json + hub_dir: ./tests/hub + diff --git a/pkg/csconfig/tests/lapi-secrets.yaml b/pkg/csconfig/tests/lapi-secrets.yaml new file mode 100644 index 0000000..22c00b7 --- /dev/null +++ b/pkg/csconfig/tests/lapi-secrets.yaml @@ -0,0 +1,3 @@ +url: http://localhost:8080 +login: test +password: testpassword \ No newline at end of file diff --git a/pkg/csconfig/tests/online-api-secrets.yaml b/pkg/csconfig/tests/online-api-secrets.yaml new file mode 100644 index 0000000..06bcdbb --- /dev/null +++ b/pkg/csconfig/tests/online-api-secrets.yaml @@ -0,0 +1,3 @@ +url: http://crowdsec.api +login: test +password: testpassword \ No newline at end of file diff --git a/pkg/csconfig/tests/profiles.yaml b/pkg/csconfig/tests/profiles.yaml new file mode 100644 index 0000000..8468b24 --- /dev/null +++ b/pkg/csconfig/tests/profiles.yaml @@ -0,0 +1,41 @@ + +name: enforce_mfa +#debug: true +filters: + - 'Alert.Remediation == true && Alert.GetScenario() == "crowdsecurity/ssh-enforce-mfa" && Alert.GetScope() == "username"' +decisions: #remediation vs decision + - type: enforce_mfa + scope: "username" + duration: 1h +on_success: continue +--- +name: default_ip_remediation +#debug: true +filters: +# try types.Ip here :) + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ban + duration: 1h +on_success: break +--- +#this one won't be reached ^^ +name: default_ip_remediation_2 +#debug: true +filters: +# try types.Ip here :) + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ratatatata + duration: 1h +on_success: break +--- +name: duration_expression +#debug: true +filters: + - Alert.Remediation == true && Alert.GetScope() == "Ip" +decisions: + - type: ban + duration: 1h +duration_expr: sprintf('%dh', 4*4) +on_success: break diff --git a/pkg/csconfig/tests/simulation.yaml b/pkg/csconfig/tests/simulation.yaml new file mode 100644 index 0000000..e9c6899 --- /dev/null +++ b/pkg/csconfig/tests/simulation.yaml @@ -0,0 +1,4 @@ +simulation: off +# exclusions: +# - crowdsecurity/ssh-bf + \ No newline at end of file diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go new file mode 100644 index 0000000..b06554a --- /dev/null +++ b/pkg/csplugin/broker.go @@ -0,0 +1,423 @@ +package csplugin + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "text/template" + "time" + + "github.com/Masterminds/sprig/v3" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/protobufs" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/google/uuid" + plugin "github.com/hashicorp/go-plugin" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" +) + +var pluginMutex sync.Mutex + +const ( + PluginProtocolVersion uint = 1 + CrowdsecPluginKey string = "CROWDSEC_PLUGIN_KEY" +) + +// The broker is responsible for running the plugins and dispatching events +// It receives all the events from the main process and stacks them up +// It is as well notified by the watcher when it needs to deliver events to plugins (based on time or count threshold) +type PluginBroker struct { + PluginChannel chan ProfileAlert + alertsByPluginName map[string][]*models.Alert + profileConfigs []*csconfig.ProfileCfg + pluginConfigByName map[string]PluginConfig + pluginMap map[string]plugin.Plugin + notificationConfigsByPluginType map[string][][]byte // "slack" -> []{config1, config2} + notificationPluginByName map[string]Notifier + watcher PluginWatcher + pluginKillMethods []func() + pluginProcConfig *csconfig.PluginCfg + pluginsTypesToDispatch map[string]struct{} +} + +// holder to determine where to dispatch config and how to format messages +type PluginConfig struct { + Type string `yaml:"type"` + Name string `yaml:"name"` + GroupWait time.Duration `yaml:"group_wait,omitempty"` + GroupThreshold int `yaml:"group_threshold,omitempty"` + MaxRetry int `yaml:"max_retry,omitempty"` + TimeOut time.Duration `yaml:"timeout,omitempty"` + + Format string `yaml:"format,omitempty"` // specific to notification plugins + + Config map[string]interface{} `yaml:",inline"` //to keep the plugin-specific config + +} + +type ProfileAlert struct { + ProfileID uint + Alert *models.Alert +} + +func (pb *PluginBroker) Init(pluginCfg *csconfig.PluginCfg, profileConfigs []*csconfig.ProfileCfg, configPaths *csconfig.ConfigurationPaths) error { + pb.PluginChannel = make(chan ProfileAlert) + pb.notificationConfigsByPluginType = make(map[string][][]byte) + pb.notificationPluginByName = make(map[string]Notifier) + pb.pluginMap = make(map[string]plugin.Plugin) + pb.pluginConfigByName = make(map[string]PluginConfig) + pb.alertsByPluginName = make(map[string][]*models.Alert) + pb.profileConfigs = profileConfigs + pb.pluginProcConfig = pluginCfg + pb.pluginsTypesToDispatch = make(map[string]struct{}) + if err := pb.loadConfig(configPaths.NotificationDir); err != nil { + return errors.Wrap(err, "while loading plugin config") + } + if err := pb.loadPlugins(configPaths.PluginDir); err != nil { + return errors.Wrap(err, "while loading plugin") + } + pb.watcher = PluginWatcher{} + pb.watcher.Init(pb.pluginConfigByName, pb.alertsByPluginName) + return nil + +} + +func (pb *PluginBroker) Kill() { + for _, kill := range pb.pluginKillMethods { + kill() + } +} + +func (pb *PluginBroker) Run(pluginTomb *tomb.Tomb) { + //we get signaled via the channel when notifications need to be delivered to plugin (via the watcher) + pb.watcher.Start(&tomb.Tomb{}) +loop: + for { + select { + case profileAlert := <-pb.PluginChannel: + pb.addProfileAlert(profileAlert) + + case pluginName := <-pb.watcher.PluginEvents: + // this can be ran in goroutine, but then locks will be needed + pluginMutex.Lock() + log.Tracef("going to deliver %d alerts to plugin %s", len(pb.alertsByPluginName[pluginName]), pluginName) + tmpAlerts := pb.alertsByPluginName[pluginName] + pb.alertsByPluginName[pluginName] = make([]*models.Alert, 0) + pluginMutex.Unlock() + go func() { + if err := pb.pushNotificationsToPlugin(pluginName, tmpAlerts); err != nil { + log.WithField("plugin:", pluginName).Error(err) + } + }() + + case <-pluginTomb.Dying(): + log.Infof("pluginTomb dying") + pb.watcher.tomb.Kill(errors.New("Terminating")) + for { + select { + case <-pb.watcher.tomb.Dead(): + log.Info("killing all plugins") + pb.Kill() + break loop + case pluginName := <-pb.watcher.PluginEvents: + // this can be ran in goroutine, but then locks will be needed + pluginMutex.Lock() + log.Tracef("going to deliver %d alerts to plugin %s", len(pb.alertsByPluginName[pluginName]), pluginName) + tmpAlerts := pb.alertsByPluginName[pluginName] + pb.alertsByPluginName[pluginName] = make([]*models.Alert, 0) + pluginMutex.Unlock() + + if err := pb.pushNotificationsToPlugin(pluginName, tmpAlerts); err != nil { + log.WithField("plugin:", pluginName).Error(err) + } + } + } + } + } +} + +func (pb *PluginBroker) addProfileAlert(profileAlert ProfileAlert) { + for _, pluginName := range pb.profileConfigs[profileAlert.ProfileID].Notifications { + if _, ok := pb.pluginConfigByName[pluginName]; !ok { + log.Errorf("plugin %s is not configured properly.", pluginName) + continue + } + pluginMutex.Lock() + pb.alertsByPluginName[pluginName] = append(pb.alertsByPluginName[pluginName], profileAlert.Alert) + pluginMutex.Unlock() + pb.watcher.Inserts <- pluginName + } +} +func (pb *PluginBroker) profilesContainPlugin(pluginName string) bool { + for _, profileCfg := range pb.profileConfigs { + for _, name := range profileCfg.Notifications { + if pluginName == name { + return true + } + } + } + return false +} +func (pb *PluginBroker) loadConfig(path string) error { + files, err := listFilesAtPath(path) + if err != nil { + return err + } + for _, configFilePath := range files { + if !strings.HasSuffix(configFilePath, ".yaml") && !strings.HasSuffix(configFilePath, ".yml") { + continue + } + + pluginConfigs, err := ParsePluginConfigFile(configFilePath) + if err != nil { + return err + } + for _, pluginConfig := range pluginConfigs { + if !pb.profilesContainPlugin(pluginConfig.Name) { + continue + } + setRequiredFields(&pluginConfig) + if _, ok := pb.pluginConfigByName[pluginConfig.Name]; ok { + log.Warnf("several configs for notification %s found ", pluginConfig.Name) + } + pb.pluginConfigByName[pluginConfig.Name] = pluginConfig + } + } + err = pb.verifyPluginConfigsWithProfile() + return err +} + +// checks whether every notification in profile has it's own config file +func (pb *PluginBroker) verifyPluginConfigsWithProfile() error { + for _, profileCfg := range pb.profileConfigs { + for _, pluginName := range profileCfg.Notifications { + if _, ok := pb.pluginConfigByName[pluginName]; !ok { + return fmt.Errorf("config file for plugin %s not found", pluginName) + } + pb.pluginsTypesToDispatch[pb.pluginConfigByName[pluginName].Type] = struct{}{} + } + } + return nil +} + +// check whether each plugin in profile has it's own binary +func (pb *PluginBroker) verifyPluginBinaryWithProfile() error { + for _, profileCfg := range pb.profileConfigs { + for _, pluginName := range profileCfg.Notifications { + if _, ok := pb.notificationPluginByName[pluginName]; !ok { + return fmt.Errorf("binary for plugin %s not found", pluginName) + } + } + } + return nil +} + +func (pb *PluginBroker) loadPlugins(path string) error { + binaryPaths, err := listFilesAtPath(path) + if err != nil { + return err + } + for _, binaryPath := range binaryPaths { + if err := pluginIsValid(binaryPath); err != nil { + return err + } + pType, pSubtype, err := getPluginTypeAndSubtypeFromPath(binaryPath) // eg pType="notification" , pSubtype="slack" + if err != nil { + return err + } + if pType != "notification" { + continue + } + + if _, ok := pb.pluginsTypesToDispatch[pSubtype]; !ok { + continue + } + + pluginClient, err := pb.loadNotificationPlugin(pSubtype, binaryPath) + if err != nil { + return err + } + for _, pc := range pb.pluginConfigByName { + if pc.Type != pSubtype { + continue + } + + data, err := yaml.Marshal(pc) + if err != nil { + return err + } + data = []byte(os.ExpandEnv(string(data))) + _, err = pluginClient.Configure(context.Background(), &protobufs.Config{Config: data}) + if err != nil { + return errors.Wrapf(err, "while configuring %s", pc.Name) + } + log.Infof("registered plugin %s", pc.Name) + pb.notificationPluginByName[pc.Name] = pluginClient + } + } + return pb.verifyPluginBinaryWithProfile() +} + +func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) (Notifier, error) { + + handshake, err := getHandshake() + if err != nil { + return nil, err + } + log.Debugf("Executing plugin %s", binaryPath) + cmd, err := pb.CreateCmd(binaryPath) + if err != nil { + return nil, err + } + pb.pluginMap[name] = &NotifierPlugin{} + l := log.New() + err = types.ConfigureLogger(l) + if err != nil { + return nil, err + } + // We set the highest level to permit plugins to set their own log level + // without that, crowdsec log level is controlling plugins level + l.SetLevel(log.TraceLevel) + logger := NewHCLogAdapter(l, "") + c := plugin.NewClient(&plugin.ClientConfig{ + HandshakeConfig: handshake, + Plugins: pb.pluginMap, + Cmd: cmd, + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + Logger: logger, + }) + client, err := c.Client() + if err != nil { + return nil, err + } + raw, err := client.Dispense(name) + if err != nil { + return nil, err + } + pb.pluginKillMethods = append(pb.pluginKillMethods, c.Kill) + return raw.(Notifier), nil +} + +func (pb *PluginBroker) pushNotificationsToPlugin(pluginName string, alerts []*models.Alert) error { + log.WithField("plugin", pluginName).Debugf("pushing %d alerts to plugin", len(alerts)) + if len(alerts) == 0 { + return nil + } + + message, err := formatAlerts(pb.pluginConfigByName[pluginName].Format, alerts) + if err != nil { + return err + } + plugin := pb.notificationPluginByName[pluginName] + backoffDuration := time.Second + for i := 1; i <= pb.pluginConfigByName[pluginName].MaxRetry; i++ { + ctx, cancel := context.WithTimeout(context.Background(), pb.pluginConfigByName[pluginName].TimeOut) + defer cancel() + _, err = plugin.Notify( + ctx, + &protobufs.Notification{ + Text: message, + Name: pluginName, + }, + ) + if err == nil { + return nil + } + log.WithField("plugin", pluginName).Errorf("%s error, retry num %d", err, i) + time.Sleep(backoffDuration) + backoffDuration *= 2 + } + + return err +} + +func ParsePluginConfigFile(path string) ([]PluginConfig, error) { + parsedConfigs := make([]PluginConfig, 0) + yamlFile, err := os.Open(path) + if err != nil { + return parsedConfigs, errors.Wrapf(err, "while opening %s", path) + } + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + for { + pc := PluginConfig{} + err = dec.Decode(&pc) + if err != nil { + if err == io.EOF { + break + } + return []PluginConfig{}, fmt.Errorf("while decoding %s got error %s", path, err) + } + parsedConfigs = append(parsedConfigs, pc) + } + return parsedConfigs, nil +} + +func setRequiredFields(pluginCfg *PluginConfig) { + if pluginCfg.MaxRetry == 0 { + pluginCfg.MaxRetry++ + } + + if pluginCfg.TimeOut == time.Second*0 { + pluginCfg.TimeOut = time.Second * 5 + } + +} + +// helper which gives paths to all files in the given directory non-recursively +func listFilesAtPath(path string) ([]string, error) { + filePaths := make([]string, 0) + files, err := os.ReadDir(path) + if err != nil { + return nil, err + } + for _, file := range files { + if file.IsDir() { + continue + } + filePaths = append(filePaths, filepath.Join(path, file.Name())) + } + return filePaths, nil +} + +func getUUID() (string, error) { + uuidv4, err := uuid.NewRandom() + if err != nil { + return "", err + } + return uuidv4.String(), nil +} + +func getHandshake() (plugin.HandshakeConfig, error) { + uuid, err := getUUID() + if err != nil { + return plugin.HandshakeConfig{}, err + } + handshake := plugin.HandshakeConfig{ + ProtocolVersion: PluginProtocolVersion, + MagicCookieKey: CrowdsecPluginKey, + MagicCookieValue: uuid, + } + return handshake, nil +} + +func formatAlerts(format string, alerts []*models.Alert) (string, error) { + template, err := template.New("").Funcs(sprig.TxtFuncMap()).Parse(format) + if err != nil { + return "", err + } + b := new(strings.Builder) + err = template.Execute(b, alerts) + if err != nil { + return "", err + } + return b.String(), nil +} diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go new file mode 100644 index 0000000..b2e9a7d --- /dev/null +++ b/pkg/csplugin/broker_test.go @@ -0,0 +1,604 @@ +//go:build linux || freebsd || netbsd || openbsd || solaris || !windows + +package csplugin + +import ( + "encoding/json" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "runtime" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +var testPath string + +func setPluginPermTo744(t *testing.T) { + setPluginPermTo(t, "744") +} + +func setPluginPermTo722(t *testing.T) { + setPluginPermTo(t, "722") +} + +func setPluginPermTo724(t *testing.T) { + setPluginPermTo(t, "724") +} +func TestGetPluginNameAndTypeFromPath(t *testing.T) { + setUp(t) + defer tearDown(t) + type args struct { + path string + } + tests := []struct { + name string + args args + want string + want1 string + expectedErr string + }{ + { + name: "valid plugin name, single dash", + args: args{ + path: path.Join(testPath, "notification-gitter"), + }, + want: "notification", + want1: "gitter", + }, + { + name: "invalid plugin name", + args: args{ + path: "./tests/gitter", + }, + expectedErr: "plugin name ./tests/gitter is invalid. Name should be like {type-name}", + }, + { + name: "valid plugin name, multiple dash", + args: args{ + path: "./tests/notification-instant-slack", + }, + want: "notification-instant", + want1: "slack", + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + got, got1, err := getPluginTypeAndSubtypeFromPath(tc.args.path) + cstest.RequireErrorContains(t, err, tc.expectedErr) + + assert.Equal(t, tc.want, got) + assert.Equal(t, tc.want1, got1) + }) + } +} + +func TestListFilesAtPath(t *testing.T) { + setUp(t) + defer tearDown(t) + type args struct { + path string + } + tests := []struct { + name string + args args + want []string + expectedErr string + }{ + { + name: "valid directory", + args: args{ + path: testPath, + }, + want: []string{ + filepath.Join(testPath, "notification-gitter"), + filepath.Join(testPath, "slack"), + }, + }, + { + name: "invalid directory", + args: args{ + path: "./foo/bar/", + }, + expectedErr: "open ./foo/bar/: " + cstest.FileNotFoundMessage, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + got, err := listFilesAtPath(tc.args.path) + cstest.RequireErrorContains(t, err, tc.expectedErr) + + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("listFilesAtPath() = %v, want %v", got, tc.want) + } + }) + } +} + +func TestBrokerInit(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + + tests := []struct { + name string + action func(*testing.T) + procCfg csconfig.PluginCfg + expectedErr string + }{ + { + name: "valid config", + action: setPluginPermTo744, + }, + { + name: "group writable binary", + expectedErr: "notification-dummy is world writable", + action: setPluginPermTo722, + }, + { + name: "group writable binary", + expectedErr: "notification-dummy is group writable", + action: setPluginPermTo724, + }, + { + name: "no plugin dir", + expectedErr: cstest.FileNotFoundMessage, + action: tearDown, + }, + { + name: "no plugin binary", + expectedErr: "binary for plugin dummy_default not found", + action: func(t *testing.T) { + err := os.Remove(path.Join(testPath, "notification-dummy")) + require.NoError(t, err) + }, + }, + { + name: "only specify user", + expectedErr: "both plugin user and group must be set", + procCfg: csconfig.PluginCfg{ + User: "123445555551122toto", + }, + action: setPluginPermTo744, + }, + { + name: "only specify group", + expectedErr: "both plugin user and group must be set", + procCfg: csconfig.PluginCfg{ + Group: "123445555551122toto", + }, + action: setPluginPermTo744, + }, + { + name: "Fails to run as root", + expectedErr: "operation not permitted", + procCfg: csconfig.PluginCfg{ + User: "root", + Group: "root", + }, + action: setPluginPermTo744, + }, + { + name: "Invalid user and group", + expectedErr: "unknown user toto1234", + procCfg: csconfig.PluginCfg{ + User: "toto1234", + Group: "toto1234", + }, + action: setPluginPermTo744, + }, + { + name: "Valid user and invalid group", + expectedErr: "unknown group toto1234", + procCfg: csconfig.PluginCfg{ + User: "nobody", + Group: "toto1234", + }, + action: setPluginPermTo744, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + defer tearDown(t) + buildDummyPlugin(t) + if tc.action != nil { + tc.action(t) + } + pb := PluginBroker{} + profiles := csconfig.NewDefaultConfig().API.Server.Profiles + profiles = append(profiles, &csconfig.ProfileCfg{ + Notifications: []string{"dummy_default"}, + }) + err := pb.Init(&tc.procCfg, profiles, &csconfig.ConfigurationPaths{ + PluginDir: testPath, + NotificationDir: "./tests/notifications", + }) + defer pb.Kill() + cstest.RequireErrorContains(t, err, tc.expectedErr) + }) + } +} + +func readconfig(t *testing.T, path string) ([]byte, PluginConfig) { + var config PluginConfig + orig, err := os.ReadFile("tests/notifications/dummy.yaml") + require.NoError(t, err,"unable to read config file %s", path) + + err = yaml.Unmarshal(orig, &config) + require.NoError(t, err,"unable to unmarshal config file") + + return orig, config +} + +func writeconfig(t *testing.T, config PluginConfig, path string) { + data, err := yaml.Marshal(&config) + require.NoError(t, err,"unable to marshal config file") + + err = os.WriteFile(path, data, 0644) + require.NoError(t, err,"unable to write config file %s", path) +} + +func TestBrokerNoThreshold(t *testing.T) { + var alerts []models.Alert + DefaultEmptyTicker = 50 * time.Millisecond + + buildDummyPlugin(t) + setPluginPermTo744(t) + defer tearDown(t) + + // init + pluginCfg := csconfig.PluginCfg{} + pb := PluginBroker{} + profiles := csconfig.NewDefaultConfig().API.Server.Profiles + profiles = append(profiles, &csconfig.ProfileCfg{ + Notifications: []string{"dummy_default"}, + }) + + // default config + err := pb.Init(&pluginCfg, profiles, &csconfig.ConfigurationPaths{ + PluginDir: testPath, + NotificationDir: "./tests/notifications", + }) + + assert.NoError(t, err) + tomb := tomb.Tomb{} + + go pb.Run(&tomb) + defer pb.Kill() + + // send one item, it should be processed right now + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + time.Sleep(200 * time.Millisecond) + + // we expect one now + content, err := os.ReadFile("./out") + require.NoError(t, err, "Error reading file") + + err = json.Unmarshal(content, &alerts) + assert.NoError(t, err) + assert.Len(t, alerts, 1) + + // remove it + os.Remove("./out") + + // and another one + log.Printf("second send") + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + time.Sleep(200 * time.Millisecond) + + // we expect one again, as we cleaned the file + content, err = os.ReadFile("./out") + require.NoError(t, err, "Error reading file") + + err = json.Unmarshal(content, &alerts) + log.Printf("content-> %s", content) + assert.NoError(t, err) + assert.Len(t, alerts, 1) +} + +func TestBrokerRunGroupAndTimeThreshold_TimeFirst(t *testing.T) { + // test grouping by "time" + DefaultEmptyTicker = 50 * time.Millisecond + buildDummyPlugin(t) + setPluginPermTo744(t) + defer tearDown(t) + + // init + pluginCfg := csconfig.PluginCfg{} + pb := PluginBroker{} + profiles := csconfig.NewDefaultConfig().API.Server.Profiles + profiles = append(profiles, &csconfig.ProfileCfg{ + Notifications: []string{"dummy_default"}, + }) + // set groupwait and groupthreshold, should honor whichever comes first + raw, cfg := readconfig(t, "tests/notifications/dummy.yaml") + cfg.GroupThreshold = 4 + cfg.GroupWait = 1 * time.Second + writeconfig(t, cfg, "tests/notifications/dummy.yaml") + err := pb.Init(&pluginCfg, profiles, &csconfig.ConfigurationPaths{ + PluginDir: testPath, + NotificationDir: "./tests/notifications", + }) + assert.NoError(t, err) + tomb := tomb.Tomb{} + + go pb.Run(&tomb) + defer pb.Kill() + // send data + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + time.Sleep(500 * time.Millisecond) + // because of group threshold, we shouldn't have data yet + assert.NoFileExists(t, "./out") + time.Sleep(1 * time.Second) + // after 1 seconds, we should have data + content, err := os.ReadFile("./out") + assert.NoError(t, err) + + var alerts []models.Alert + err = json.Unmarshal(content, &alerts) + assert.NoError(t, err) + assert.Len(t, alerts, 3) + + // restore config + err = os.WriteFile("tests/notifications/dummy.yaml", raw, 0644) + require.NoError(t, err,"unable to write config file") +} + +func TestBrokerRunGroupAndTimeThreshold_CountFirst(t *testing.T) { + DefaultEmptyTicker = 50 * time.Millisecond + buildDummyPlugin(t) + setPluginPermTo(t, "744") + defer tearDown(t) + + // init + pluginCfg := csconfig.PluginCfg{} + pb := PluginBroker{} + profiles := csconfig.NewDefaultConfig().API.Server.Profiles + profiles = append(profiles, &csconfig.ProfileCfg{ + Notifications: []string{"dummy_default"}, + }) + + // set groupwait and groupthreshold, should honor whichever comes first + raw, cfg := readconfig(t, "tests/notifications/dummy.yaml") + cfg.GroupThreshold = 4 + cfg.GroupWait = 4 * time.Second + writeconfig(t, cfg, "tests/notifications/dummy.yaml") + err := pb.Init(&pluginCfg, profiles, &csconfig.ConfigurationPaths{ + PluginDir: testPath, + NotificationDir: "./tests/notifications", + }) + assert.NoError(t, err) + tomb := tomb.Tomb{} + + go pb.Run(&tomb) + defer pb.Kill() + + // send data + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + time.Sleep(100 * time.Millisecond) + + // because of group threshold, we shouldn't have data yet + assert.NoFileExists(t, "./out") + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + time.Sleep(100 * time.Millisecond) + + // and now we should + content, err := os.ReadFile("./out") + require.NoError(t, err, "Error reading file") + + var alerts []models.Alert + err = json.Unmarshal(content, &alerts) + assert.NoError(t, err) + assert.Len(t, alerts, 4) + + // restore config + err = os.WriteFile("tests/notifications/dummy.yaml", raw, 0644) + require.NoError(t, err,"unable to write config file") +} + +func TestBrokerRunGroupThreshold(t *testing.T) { + // test grouping by "size" + DefaultEmptyTicker = 50 * time.Millisecond + buildDummyPlugin(t) + setPluginPermTo(t, "744") + defer tearDown(t) + + // init + pluginCfg := csconfig.PluginCfg{} + pb := PluginBroker{} + profiles := csconfig.NewDefaultConfig().API.Server.Profiles + profiles = append(profiles, &csconfig.ProfileCfg{ + Notifications: []string{"dummy_default"}, + }) + + // set groupwait + raw, cfg := readconfig(t, "tests/notifications/dummy.yaml") + cfg.GroupThreshold = 4 + writeconfig(t, cfg, "tests/notifications/dummy.yaml") + err := pb.Init(&pluginCfg, profiles, &csconfig.ConfigurationPaths{ + PluginDir: testPath, + NotificationDir: "./tests/notifications", + }) + + assert.NoError(t, err) + tomb := tomb.Tomb{} + + go pb.Run(&tomb) + defer pb.Kill() + + // send data + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + time.Sleep(100 * time.Millisecond) + + // because of group threshold, we shouldn't have data yet + assert.NoFileExists(t, "./out") + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + time.Sleep(100 * time.Millisecond) + + // and now we should + content, err := os.ReadFile("./out") + require.NoError(t, err, "Error reading file") + + var alerts []models.Alert + err = json.Unmarshal(content, &alerts) + assert.NoError(t, err) + assert.Len(t, alerts, 4) + + // restore config + err = os.WriteFile("tests/notifications/dummy.yaml", raw, 0644) + require.NoError(t, err, "unable to write config file") +} + +func TestBrokerRunTimeThreshold(t *testing.T) { + DefaultEmptyTicker = 50 * time.Millisecond + buildDummyPlugin(t) + setPluginPermTo(t, "744") + defer tearDown(t) + + // init + pluginCfg := csconfig.PluginCfg{} + pb := PluginBroker{} + profiles := csconfig.NewDefaultConfig().API.Server.Profiles + profiles = append(profiles, &csconfig.ProfileCfg{ + Notifications: []string{"dummy_default"}, + }) + + // set groupwait + raw, cfg := readconfig(t, "tests/notifications/dummy.yaml") + cfg.GroupWait = 1 * time.Second + writeconfig(t, cfg, "tests/notifications/dummy.yaml") + err := pb.Init(&pluginCfg, profiles, &csconfig.ConfigurationPaths{ + PluginDir: testPath, + NotificationDir: "./tests/notifications", + }) + assert.NoError(t, err) + tomb := tomb.Tomb{} + + go pb.Run(&tomb) + defer pb.Kill() + + // send data + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + time.Sleep(200 * time.Millisecond) + + // we shouldn't have data yet + assert.NoFileExists(t, "./out") + time.Sleep(1 * time.Second) + + // and now we should + content, err := os.ReadFile("./out") + require.NoError(t, err, "Error reading file") + + var alerts []models.Alert + err = json.Unmarshal(content, &alerts) + assert.NoError(t, err) + assert.Len(t, alerts, 1) + + // restore config + err = os.WriteFile("tests/notifications/dummy.yaml", raw, 0644) + require.NoError(t, err, "unable to write config file %s", err) +} + +func TestBrokerRunSimple(t *testing.T) { + DefaultEmptyTicker = 50 * time.Millisecond + buildDummyPlugin(t) + setPluginPermTo(t, "744") + defer tearDown(t) + pluginCfg := csconfig.PluginCfg{} + pb := PluginBroker{} + profiles := csconfig.NewDefaultConfig().API.Server.Profiles + profiles = append(profiles, &csconfig.ProfileCfg{ + Notifications: []string{"dummy_default"}, + }) + err := pb.Init(&pluginCfg, profiles, &csconfig.ConfigurationPaths{ + PluginDir: testPath, + NotificationDir: "./tests/notifications", + }) + assert.NoError(t, err) + tomb := tomb.Tomb{} + + go pb.Run(&tomb) + defer pb.Kill() + + assert.NoFileExists(t, "./out") + + defer os.Remove("./out") + + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + time.Sleep(time.Millisecond * 200) + + content, err := os.ReadFile("./out") + require.NoError(t, err, "Error reading file") + + var alerts []models.Alert + err = json.Unmarshal(content, &alerts) + assert.NoError(t, err) + assert.Len(t, alerts, 2) +} + +func buildDummyPlugin(t *testing.T) { + dir, err := os.MkdirTemp("./tests", "cs_plugin_test") + require.NoError(t, err) + + cmd := exec.Command("go", "build", "-o", path.Join(dir, "notification-dummy"), "../../plugins/notifications/dummy/") + err = cmd.Run() + require.NoError(t, err, "while building dummy plugin") + + testPath = dir + os.Remove("./out") +} + +func setPluginPermTo(t *testing.T, perm string) { + if runtime.GOOS != "windows" { + err := exec.Command("chmod", perm, path.Join(testPath, "notification-dummy")).Run() + require.NoError(t, err, "chmod 744 %s", path.Join(testPath, "notification-dummy")) + } +} + +func setUp(t *testing.T) { + dir, err := os.MkdirTemp("./", "cs_plugin_test") + require.NoError(t, err) + + f, err := os.Create(path.Join(dir, "slack")) + require.NoError(t, err) + + f.Close() + f, err = os.Create(path.Join(dir, "notification-gitter")) + require.NoError(t, err) + + f.Close() + err = os.Mkdir(path.Join(dir, "dummy_dir"), 0666) + require.NoError(t, err) + + testPath = dir +} + +func tearDown(t *testing.T) { + err := os.RemoveAll(testPath) + require.NoError(t, err) + + os.Remove("./out") +} diff --git a/pkg/csplugin/broker_win_test.go b/pkg/csplugin/broker_win_test.go new file mode 100644 index 0000000..2595db3 --- /dev/null +++ b/pkg/csplugin/broker_win_test.go @@ -0,0 +1,262 @@ +//go:build windows + +package csplugin + +import ( + "log" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cstest" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +/* +Due to the complexity of file permission modification with go on windows, we only test the basic behavior the broker, +not if it will actually reject plugins with invalid permissions +*/ + +var testPath string + +func TestGetPluginNameAndTypeFromPath(t *testing.T) { + setUp() + defer tearDown() + type args struct { + path string + } + tests := []struct { + name string + args args + want string + want1 string + wantErr bool + }{ + { + name: "valid plugin name, single dash", + args: args{ + path: path.Join(testPath, "notification-gitter"), + }, + want: "notification", + want1: "gitter", + wantErr: false, + }, + { + name: "invalid plugin name", + args: args{ + path: ".\\tests\\gitter.exe", + }, + want: "", + want1: "", + wantErr: true, + }, + { + name: "valid plugin name, multiple dash", + args: args{ + path: ".\\tests\\notification-instant-slack.exe", + }, + want: "notification-instant", + want1: "slack", + wantErr: false, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + got, got1, err := getPluginTypeAndSubtypeFromPath(tt.args.path) + if (err != nil) != tt.wantErr { + t.Errorf("getPluginNameAndTypeFromPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("getPluginNameAndTypeFromPath() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("getPluginNameAndTypeFromPath() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func TestListFilesAtPath(t *testing.T) { + setUp() + defer tearDown() + type args struct { + path string + } + tests := []struct { + name string + args args + want []string + wantErr bool + }{ + { + name: "valid directory", + args: args{ + path: testPath, + }, + want: []string{ + filepath.Join(testPath, "notification-gitter"), + filepath.Join(testPath, "slack"), + }, + }, + { + name: "invalid directory", + args: args{ + path: "./foo/bar/", + }, + wantErr: true, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + got, err := listFilesAtPath(tt.args.path) + if (err != nil) != tt.wantErr { + t.Errorf("listFilesAtPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("listFilesAtPath() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestBrokerInit(t *testing.T) { + tests := []struct { + name string + action func() + errContains string + wantErr bool + procCfg csconfig.PluginCfg + }{ + { + name: "valid config", + wantErr: false, + }, + { + name: "no plugin dir", + wantErr: true, + errContains: cstest.FileNotFoundMessage, + action: tearDown, + }, + { + name: "no plugin binary", + wantErr: true, + errContains: "binary for plugin dummy_default not found", + action: func() { + err := os.Remove(path.Join(testPath, "notification-dummy.exe")) + if err != nil { + t.Fatal(err) + } + }, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + defer tearDown() + buildDummyPlugin() + if test.action != nil { + test.action() + } + pb := PluginBroker{} + profiles := csconfig.NewDefaultConfig().API.Server.Profiles + profiles = append(profiles, &csconfig.ProfileCfg{ + Notifications: []string{"dummy_default"}, + }) + err := pb.Init(&test.procCfg, profiles, &csconfig.ConfigurationPaths{ + PluginDir: testPath, + NotificationDir: "./tests/notifications", + }) + defer pb.Kill() + if test.wantErr { + assert.ErrorContains(t, err, test.errContains) + } else { + assert.NoError(t, err) + } + + }) + } +} + +func TestBrokerRun(t *testing.T) { + buildDummyPlugin() + defer tearDown() + procCfg := csconfig.PluginCfg{} + pb := PluginBroker{} + profiles := csconfig.NewDefaultConfig().API.Server.Profiles + profiles = append(profiles, &csconfig.ProfileCfg{ + Notifications: []string{"dummy_default"}, + }) + err := pb.Init(&procCfg, profiles, &csconfig.ConfigurationPaths{ + PluginDir: testPath, + NotificationDir: "./tests/notifications", + }) + assert.NoError(t, err) + tomb := tomb.Tomb{} + go pb.Run(&tomb) + defer pb.Kill() + + assert.NoFileExists(t, "./out") + defer os.Remove("./out") + + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} + time.Sleep(time.Second * 4) + + assert.FileExists(t, ".\\out") + assert.Equal(t, types.GetLineCountForFile(".\\out"), 2) +} + +func buildDummyPlugin() { + dir, err := os.MkdirTemp(".\\tests", "cs_plugin_test") + if err != nil { + log.Fatal(err) + } + cmd := exec.Command("go", "build", "-o", path.Join(dir, "notification-dummy.exe"), "../../plugins/notifications/dummy/") + if err := cmd.Run(); err != nil { + log.Fatal(err) + } + testPath = dir +} + +func setUp() { + dir, err := os.MkdirTemp("./", "cs_plugin_test") + if err != nil { + log.Fatal(err) + } + f, err := os.Create(path.Join(dir, "slack")) + if err != nil { + log.Fatal(err) + } + f.Close() + f, err = os.Create(path.Join(dir, "notification-gitter")) + if err != nil { + log.Fatal(err) + } + f.Close() + err = os.Mkdir(path.Join(dir, "dummy_dir"), 0666) + if err != nil { + log.Fatal(err) + } + testPath = dir +} + +func tearDown() { + err := os.RemoveAll(testPath) + if err != nil { + log.Fatal(err) + } +} diff --git a/pkg/csplugin/hclog_adapter.go b/pkg/csplugin/hclog_adapter.go new file mode 100644 index 0000000..253623c --- /dev/null +++ b/pkg/csplugin/hclog_adapter.go @@ -0,0 +1,213 @@ +// Copyright 2021 Workrise Technologies Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package csplugin + +import ( + "fmt" + "io" + "log" + "os" + "reflect" + + "github.com/hashicorp/go-hclog" + "github.com/sirupsen/logrus" +) + +// NewHCLogAdapter takes an instance of a Logrus logger and returns an hclog +// logger in the form of an HCLogAdapter. +func NewHCLogAdapter(l *logrus.Logger, name string) hclog.Logger { + return &HCLogAdapter{l, name, nil} +} + +// HCLogAdapter implements the hclog interface. Plugins use hclog to send +// log entries back to ephemeral-iam and this adapter allows for those logs +// to be handled by ephemeral-iam's Logrus logger. +type HCLogAdapter struct { + log *logrus.Logger + name string + + impliedArgs []interface{} +} + +func (h HCLogAdapter) Log(level hclog.Level, msg string, args ...interface{}) { + switch level { + case hclog.NoLevel: + return + case hclog.Trace: + h.Trace(msg, args...) + case hclog.Debug: + h.Debug(msg, args...) + case hclog.Info: + h.Info(msg, args...) + case hclog.Warn: + h.Warn(msg, args...) + case hclog.Error: + h.Error(msg, args...) + } +} + +func (h HCLogAdapter) Trace(msg string, args ...interface{}) { + h.log.WithFields(toLogrusFields(args)).Trace(msg) +} + +func (h HCLogAdapter) Debug(msg string, args ...interface{}) { + h.log.WithFields(toLogrusFields(args)).Debug(msg) +} + +func (h HCLogAdapter) Info(msg string, args ...interface{}) { + h.log.WithFields(toLogrusFields(args)).Info(msg) +} + +func (h HCLogAdapter) Warn(msg string, args ...interface{}) { + h.log.WithFields(toLogrusFields(args)).Warn(msg) +} + +func (h HCLogAdapter) Error(msg string, args ...interface{}) { + h.log.WithFields(toLogrusFields(args)).Error(msg) +} + +func (h HCLogAdapter) IsTrace() bool { + return h.log.GetLevel() >= logrus.TraceLevel +} + +func (h HCLogAdapter) IsDebug() bool { + return h.log.GetLevel() >= logrus.DebugLevel +} + +func (h HCLogAdapter) IsInfo() bool { + return h.log.GetLevel() >= logrus.InfoLevel +} + +func (h HCLogAdapter) IsWarn() bool { + return h.log.GetLevel() >= logrus.WarnLevel +} + +func (h HCLogAdapter) IsError() bool { + return h.log.GetLevel() >= logrus.ErrorLevel +} + +func (h HCLogAdapter) ImpliedArgs() []interface{} { + // Not supported. + return nil +} + +func (h HCLogAdapter) With(args ...interface{}) hclog.Logger { + return &h +} + +func (h HCLogAdapter) Name() string { + return h.name +} + +func (h HCLogAdapter) Named(name string) hclog.Logger { + return NewHCLogAdapter(h.log, name) +} + +func (h HCLogAdapter) ResetNamed(name string) hclog.Logger { + return &h +} + +func (h *HCLogAdapter) SetLevel(level hclog.Level) { + h.log.SetLevel(convertLevel(level)) +} + +func (h HCLogAdapter) StandardLogger(opts *hclog.StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &hclog.StandardLoggerOptions{} + } + return log.New(h.StandardWriter(opts), "", 0) +} + +func (h HCLogAdapter) StandardWriter(opts *hclog.StandardLoggerOptions) io.Writer { + return os.Stderr +} + +// convertLevel maps hclog levels to Logrus levels. +func convertLevel(level hclog.Level) logrus.Level { + switch level { + case hclog.NoLevel: + // Logrus does not have NoLevel, so use Info instead. + return logrus.InfoLevel + case hclog.Trace: + return logrus.TraceLevel + case hclog.Debug: + return logrus.DebugLevel + case hclog.Info: + return logrus.InfoLevel + case hclog.Warn: + return logrus.WarnLevel + case hclog.Error: + return logrus.ErrorLevel + default: + return logrus.InfoLevel + } +} + +// toLogrusFields takes a list of key/value pairs passed to the hclog logger +// and converts them to a map to be used as Logrus fields. +func toLogrusFields(kvPairs []interface{}) map[string]interface{} { + m := map[string]interface{}{} + if len(kvPairs) == 0 { + return m + } + + if len(kvPairs)%2 == 1 { + // There are an odd number of key/value pairs so append nil as the final value. + kvPairs = append(kvPairs, nil) + } + + for i := 0; i < len(kvPairs); i += 2 { + // hclog automatically adds the timestamp field, ignore it. + if kvPairs[i] != "timestamp" { + merge(m, kvPairs[i], kvPairs[i+1]) + } + } + return m +} + +// merge takes a key/value pair and converts them to strings then adds them to +// the dst map. +func merge(dst map[string]interface{}, k, v interface{}) { + var key string + + switch x := k.(type) { + case string: + key = x + case fmt.Stringer: + key = safeString(x) + default: + key = fmt.Sprint(x) + } + + dst[key] = v +} + +// safeString takes an interface that implements the String() function and calls it +// to attempt to convert it to a string. If a panic occurs, and it's caused by a +// nil pointer, the value will be set to "NULL". +func safeString(str fmt.Stringer) (s string) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { + s = "NULL" + } else { + panic(panicVal) + } + } + }() + + s = str.String() + return +} diff --git a/pkg/csplugin/notifier.go b/pkg/csplugin/notifier.go new file mode 100644 index 0000000..64a1e6e --- /dev/null +++ b/pkg/csplugin/notifier.go @@ -0,0 +1,59 @@ +package csplugin + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" + plugin "github.com/hashicorp/go-plugin" + "google.golang.org/grpc" +) + +type Notifier interface { + Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) + Configure(ctx context.Context, cfg *protobufs.Config) (*protobufs.Empty, error) +} + +type NotifierPlugin struct { + plugin.Plugin + Impl Notifier +} + +type GRPCClient struct{ client protobufs.NotifierClient } + +func (m *GRPCClient) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { + done := make(chan error) + go func() { + _, err := m.client.Notify( + context.Background(), &protobufs.Notification{Text: notification.Text, Name: notification.Name}, + ) + done <- err + }() + select { + case err := <-done: + return &protobufs.Empty{}, err + + case <-ctx.Done(): + return &protobufs.Empty{}, fmt.Errorf("timeout exceeded") + } +} + +func (m *GRPCClient) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { + _, err := m.client.Configure( + context.Background(), config, + ) + return &protobufs.Empty{}, err +} + +type GRPCServer struct { + Impl Notifier +} + +func (p *NotifierPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + protobufs.RegisterNotifierServer(s, p.Impl) + return nil +} + +func (p *NotifierPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCClient{client: protobufs.NewNotifierClient(c)}, nil +} diff --git a/pkg/csplugin/tests/notifications/dummy.yaml b/pkg/csplugin/tests/notifications/dummy.yaml new file mode 100644 index 0000000..1b883af --- /dev/null +++ b/pkg/csplugin/tests/notifications/dummy.yaml @@ -0,0 +1,22 @@ +type: dummy # Don't change +name: dummy_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info + +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +# timeout: # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the logs and to a text file, if defined +format: | + {{.|toJson}} + +# +output_file: ./out # notifications will be appended here. optional + diff --git a/pkg/csplugin/utils.go b/pkg/csplugin/utils.go new file mode 100644 index 0000000..cb8104a --- /dev/null +++ b/pkg/csplugin/utils.go @@ -0,0 +1,137 @@ +//go:build linux || freebsd || netbsd || openbsd || solaris || !windows + +package csplugin + +import ( + "fmt" + "io/fs" + "math" + "os" + "os/exec" + "os/user" + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/pkg/errors" +) + +func CheckCredential(uid int, gid int) *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Credential: &syscall.Credential{ + Uid: uint32(uid), + Gid: uint32(gid), + }, + } +} + +func (pb *PluginBroker) CreateCmd(binaryPath string) (*exec.Cmd, error) { + var err error + cmd := exec.Command(binaryPath) + if pb.pluginProcConfig.User != "" || pb.pluginProcConfig.Group != "" { + if !(pb.pluginProcConfig.User != "" && pb.pluginProcConfig.Group != "") { + return nil, errors.New("while getting process attributes: both plugin user and group must be set") + } + cmd.SysProcAttr, err = getProcessAttr(pb.pluginProcConfig.User, pb.pluginProcConfig.Group) + if err != nil { + return nil, errors.Wrap(err, "while getting process attributes") + } + cmd.SysProcAttr.Credential.NoSetGroups = true + } + return cmd, err +} + +func getUID(username string) (uint32, error) { + u, err := user.Lookup(username) + if err != nil { + return 0, err + } + uid, err := strconv.ParseInt(u.Uid, 10, 32) + if err != nil { + return 0, err + } + if uid < 0 || uid > math.MaxInt32 { + return 0, fmt.Errorf("out of bound uid") + } + return uint32(uid), nil +} + +func getGID(groupname string) (uint32, error) { + g, err := user.LookupGroup(groupname) + if err != nil { + return 0, err + } + gid, err := strconv.ParseInt(g.Gid, 10, 32) + if err != nil { + return 0, err + } + if gid < 0 || gid > math.MaxInt32 { + return 0, fmt.Errorf("out of bound gid") + } + return uint32(gid), nil +} + +func getPluginTypeAndSubtypeFromPath(path string) (string, string, error) { + pluginFileName := filepath.Base(path) + parts := strings.Split(pluginFileName, "-") + if len(parts) < 2 { + return "", "", fmt.Errorf("plugin name %s is invalid. Name should be like {type-name}", path) + } + return strings.Join(parts[:len(parts)-1], "-"), parts[len(parts)-1], nil +} + +func getProcessAttr(username string, groupname string) (*syscall.SysProcAttr, error) { + uid, err := getUID(username) + if err != nil { + return nil, err + } + gid, err := getGID(groupname) + if err != nil { + return nil, err + } + + return &syscall.SysProcAttr{ + Credential: &syscall.Credential{ + Uid: uid, + Gid: gid, + }, + }, nil +} + +func pluginIsValid(path string) error { + var details fs.FileInfo + var err error + + // check if it exists + if details, err = os.Stat(path); err != nil { + return errors.Wrap(err, fmt.Sprintf("plugin at %s does not exist", path)) + } + + // check if it is owned by current user + currentUser, err := user.Current() + if err != nil { + return errors.Wrap(err, "while getting current user") + } + currentUID, err := getUID(currentUser.Username) + if err != nil { + return errors.Wrap(err, "while looking up the current uid") + } + stat := details.Sys().(*syscall.Stat_t) + if stat.Uid != currentUID { + return fmt.Errorf("plugin at %s is not owned by user '%s'", path, currentUser.Username) + } + + mode := details.Mode() + perm := uint32(mode) + if (perm & 00002) != 0 { + return fmt.Errorf("plugin at %s is world writable, world writable plugins are invalid", path) + } + if (perm & 00020) != 0 { + return fmt.Errorf("plugin at %s is group writable, group writable plugins are invalid", path) + } + if (mode & os.ModeSetgid) != 0 { + return fmt.Errorf("plugin at %s has setgid permission, which is not allowed", path) + } + return nil +} diff --git a/pkg/csplugin/utils_windows.go b/pkg/csplugin/utils_windows.go new file mode 100644 index 0000000..874e300 --- /dev/null +++ b/pkg/csplugin/utils_windows.go @@ -0,0 +1,242 @@ +//go:build windows + +package csplugin + +import ( + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "reflect" + "strings" + "syscall" + "unsafe" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + advapi32 = syscall.NewLazyDLL("advapi32.dll") + + procGetAce = advapi32.NewProc("GetAce") +) + +type AclSizeInformation struct { + AceCount uint32 + AclBytesInUse uint32 + AclBytesFree uint32 +} + +type Acl struct { + AclRevision uint8 + Sbz1 uint8 + AclSize uint16 + AceCount uint16 + Sbz2 uint16 +} + +type AccessAllowedAce struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 + AccessMask uint32 + SidStart uint32 +} + +const ACCESS_ALLOWED_ACE_TYPE = 0 +const ACCESS_DENIED_ACE_TYPE = 1 + +func CheckPerms(path string) error { + log.Debugf("checking permissions of %s\n", path) + + systemSid, err := windows.CreateWellKnownSid(windows.WELL_KNOWN_SID_TYPE(windows.WinLocalSystemSid)) + if err != nil { + return errors.Wrap(err, "while creating SYSTEM well known sid") + } + + adminSid, err := windows.CreateWellKnownSid(windows.WELL_KNOWN_SID_TYPE(windows.WinBuiltinAdministratorsSid)) + if err != nil { + return errors.Wrap(err, "while creating built-in Administrators well known sid") + } + + currentUser, err := user.Current() + if err != nil { + return errors.Wrap(err, "while getting current user") + } + + currentUserSid, _, _, err := windows.LookupSID("", currentUser.Username) + + if err != nil { + return errors.Wrap(err, "while looking up current user sid") + } + + sd, err := windows.GetNamedSecurityInfo(path, windows.SE_FILE_OBJECT, windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION) + if err != nil { + return errors.Wrap(err, "while getting owner security info") + } + if !sd.IsValid() { + return errors.New("security descriptor is invalid") + } + owner, _, err := sd.Owner() + if err != nil { + return errors.Wrap(err, "while getting owner") + } + if !owner.IsValid() { + return errors.New("owner is invalid") + } + + if !owner.Equals(systemSid) && !owner.Equals(currentUserSid) && !owner.Equals(adminSid) { + return fmt.Errorf("plugin at %s is not owned by SYSTEM, Administrators or by current user, but by %s", path, owner.String()) + } + + dacl, _, err := sd.DACL() + if err != nil { + return errors.Wrap(err, "while getting DACL") + } + + if dacl == nil { + return fmt.Errorf("no DACL found on plugin, meaning fully permissive access on plugin %s", path) + } + + if err != nil { + return errors.Wrap(err, "while looking up current user sid") + } + + rs := reflect.ValueOf(dacl).Elem() + + /* + For reference, the structure of the ACL type is: + type ACL struct { + aclRevision byte + sbz1 byte + aclSize uint16 + aceCount uint16 + sbz2 uint16 + } + As the field are not exported, we have to use reflection to access them, this should not be an issue as the structure won't (probably) change any time soon. + */ + aceCount := rs.Field(3).Uint() + + for i := uint64(0); i < aceCount; i++ { + ace := &AccessAllowedAce{} + ret, _, _ := procGetAce.Call(uintptr(unsafe.Pointer(dacl)), uintptr(i), uintptr(unsafe.Pointer(&ace))) + if ret == 0 { + return errors.Wrap(windows.GetLastError(), "while getting ACE") + } + log.Debugf("ACE %d: %+v\n", i, ace) + + if ace.AceType == ACCESS_DENIED_ACE_TYPE { + continue + } + aceSid := (*windows.SID)(unsafe.Pointer(&ace.SidStart)) + + if aceSid.Equals(systemSid) || aceSid.Equals(adminSid) { + log.Debugf("Not checking permission for well-known SID %s", aceSid.String()) + continue + } + + if aceSid.Equals(currentUserSid) { + log.Debugf("Not checking permission for current user %s", currentUser.Username) + continue + } + + log.Debugf("Checking permission for SID %s", aceSid.String()) + denyMask := ^(windows.FILE_GENERIC_READ | windows.FILE_GENERIC_EXECUTE) + if ace.AccessMask&uint32(denyMask) != 0 { + return fmt.Errorf("only SYSTEM, Administrators or the user currently running crowdsec can have more than read/execute on plugin %s", path) + } + } + + return nil +} + +func getProcessAtr() (*syscall.SysProcAttr, error) { + var procToken, token windows.Token + + proc := windows.CurrentProcess() + defer windows.CloseHandle(proc) + + err := windows.OpenProcessToken(proc, windows.TOKEN_DUPLICATE|windows.TOKEN_ADJUST_DEFAULT| + windows.TOKEN_QUERY|windows.TOKEN_ASSIGN_PRIMARY|windows.TOKEN_ADJUST_GROUPS|windows.TOKEN_ADJUST_PRIVILEGES, &procToken) + if err != nil { + return nil, errors.Wrapf(err, "while opening process token") + } + defer procToken.Close() + + err = windows.DuplicateTokenEx(procToken, 0, nil, windows.SecurityImpersonation, + windows.TokenPrimary, &token) + if err != nil { + return nil, errors.Wrapf(err, "while duplicating token") + } + + //Remove all privileges from the token + + err = windows.AdjustTokenPrivileges(token, true, nil, 0, nil, nil) + + if err != nil { + return nil, errors.Wrapf(err, "while adjusting token privileges") + } + + //Run the plugin as a medium integrity level process + //For some reasons, low level integrity don't work, the plugin and crowdsec cannot communicate over the TCP socket + sid, err := windows.CreateWellKnownSid(windows.WELL_KNOWN_SID_TYPE(windows.WinMediumLabelSid)) + if err != nil { + return nil, err + } + + tml := &windows.Tokenmandatorylabel{} + tml.Label.Attributes = windows.SE_GROUP_INTEGRITY + tml.Label.Sid = sid + + err = windows.SetTokenInformation(token, windows.TokenIntegrityLevel, + (*byte)(unsafe.Pointer(tml)), tml.Size()) + if err != nil { + token.Close() + return nil, errors.Wrapf(err, "while setting token information") + } + + return &windows.SysProcAttr{ + CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP, + Token: syscall.Token(token), + }, nil +} + +func (pb *PluginBroker) CreateCmd(binaryPath string) (*exec.Cmd, error) { + var err error + cmd := exec.Command(binaryPath) + cmd.SysProcAttr, err = getProcessAtr() + if err != nil { + return nil, errors.Wrap(err, "while getting process attributes") + } + return cmd, err +} + +func getPluginTypeAndSubtypeFromPath(path string) (string, string, error) { + pluginFileName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) + + parts := strings.Split(pluginFileName, "-") + if len(parts) < 2 { + return "", "", fmt.Errorf("plugin name %s is invalid. Name should be like {type-name}", path) + } + return strings.Join(parts[:len(parts)-1], "-"), parts[len(parts)-1], nil +} + +func pluginIsValid(path string) error { + var err error + + // check if it exists + if _, err = os.Stat(path); err != nil { + return errors.Wrap(err, fmt.Sprintf("plugin at %s does not exist", path)) + } + + // check if it is owned by root + err = CheckPerms(path) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/csplugin/watcher.go b/pkg/csplugin/watcher.go new file mode 100644 index 0000000..a95ade5 --- /dev/null +++ b/pkg/csplugin/watcher.go @@ -0,0 +1,163 @@ +package csplugin + +import ( + "sync" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/models" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" +) + +/* + PluginWatcher is here to allow grouping and threshold features for notification plugins : + by frequency : it will signal the plugin to deliver notifications at this frequency (watchPluginTicker) + by threshold : it will signal the plugin to deliver notifications when the number of alerts for this plugin reaches this threshold (watchPluginAlertCounts) +*/ + +// TODO: When we start using go 1.18, consider moving this struct in some utils pkg. Make the implementation more generic using generics :) +type alertCounterByPluginName struct { + sync.Mutex + data map[string]int +} + +func newAlertCounterByPluginName() alertCounterByPluginName { + return alertCounterByPluginName{ + data: make(map[string]int), + } +} + +func (acp *alertCounterByPluginName) Init() { + acp.data = make(map[string]int) +} + +func (acp *alertCounterByPluginName) Get(key string) (int, bool) { + acp.Lock() + val, ok := acp.data[key] + acp.Unlock() + return val, ok +} + +func (acp *alertCounterByPluginName) Set(key string, val int) { + acp.Lock() + acp.data[key] = val + acp.Unlock() +} + +type PluginWatcher struct { + PluginConfigByName map[string]PluginConfig + AlertCountByPluginName alertCounterByPluginName + PluginEvents chan string + Inserts chan string + tomb *tomb.Tomb +} + +var DefaultEmptyTicker = time.Second * 1 + +func (pw *PluginWatcher) Init(configs map[string]PluginConfig, alertsByPluginName map[string][]*models.Alert) { + pw.PluginConfigByName = configs + pw.PluginEvents = make(chan string) + pw.AlertCountByPluginName = newAlertCounterByPluginName() + pw.Inserts = make(chan string) + for name := range alertsByPluginName { + pw.AlertCountByPluginName.Set(name, 0) + } +} + +func (pw *PluginWatcher) Start(tomb *tomb.Tomb) { + pw.tomb = tomb + for name := range pw.PluginConfigByName { + pname := name + pw.tomb.Go(func() error { + pw.watchPluginTicker(pname) + return nil + }) + } + + pw.tomb.Go(func() error { + pw.watchPluginAlertCounts() + return nil + }) +} + +func (pw *PluginWatcher) watchPluginTicker(pluginName string) { + var watchTime time.Duration + var watchCount int = -1 + // Threshold can be set : by time, by count, or both + // if only time is set, honor it + // if only count is set, put timer to 1 second and just check size + // if both are set, set timer to 1 second, but check size && time + interval := pw.PluginConfigByName[pluginName].GroupWait + threshold := pw.PluginConfigByName[pluginName].GroupThreshold + + //only size is set + if threshold > 0 && interval == 0 { + watchCount = threshold + watchTime = DefaultEmptyTicker + } else if interval != 0 && threshold == 0 { + //only time is set + watchTime = interval + } else if interval != 0 && threshold != 0 { + //both are set + watchTime = DefaultEmptyTicker + watchCount = threshold + } else { + //none are set, we sent every event we receive + watchTime = DefaultEmptyTicker + watchCount = 1 + } + + ticker := time.NewTicker(watchTime) + var lastSend time.Time = time.Now() + for { + select { + case <-ticker.C: + send := false + //if count threshold was set, honor no matter what + if pc, _ := pw.AlertCountByPluginName.Get(pluginName); watchCount > 0 && pc >= watchCount { + log.Tracef("[%s] %d alerts received, sending\n", pluginName, pc) + send = true + pw.AlertCountByPluginName.Set(pluginName, 0) + } + //if time threshold only was set + if watchTime > 0 && watchTime == interval { + log.Tracef("sending alerts to %s, duration %s elapsed", pluginName, interval) + send = true + } + + //if we hit timer because it was set low to honor count, check if we should trigger + if watchTime == DefaultEmptyTicker && watchTime != interval && interval != 0 { + if lastSend.Add(interval).Before(time.Now()) { + log.Tracef("sending alerts to %s, duration %s elapsed", pluginName, interval) + send = true + lastSend = time.Now() + } + } + if send { + log.Tracef("sending alerts to %s", pluginName) + pw.PluginEvents <- pluginName + } + case <-pw.tomb.Dying(): + ticker.Stop() + // emptying + // no lock here because we have the broker still listening even in dying state before killing us + pw.PluginEvents <- pluginName + return + } + } +} + +func (pw *PluginWatcher) watchPluginAlertCounts() { + for { + select { + case pluginName := <-pw.Inserts: + //we only "count" pending alerts, and watchPluginTicker is actually going to send it + if _, ok := pw.PluginConfigByName[pluginName]; ok { + curr, _ := pw.AlertCountByPluginName.Get(pluginName) + pw.AlertCountByPluginName.Set(pluginName, curr+1) + } + case <-pw.tomb.Dying(): + return + } + } +} diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go new file mode 100644 index 0000000..94d8d06 --- /dev/null +++ b/pkg/csplugin/watcher_test.go @@ -0,0 +1,117 @@ +package csplugin + +import ( + "context" + "log" + "runtime" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "gopkg.in/tomb.v2" + "gotest.tools/v3/assert" +) + +var ctx = context.Background() + +func resetTestTomb(testTomb *tomb.Tomb, pw *PluginWatcher) { + testTomb.Kill(nil) + <-pw.PluginEvents + if err := testTomb.Wait(); err != nil { + log.Fatal(err) + } +} + +func resetWatcherAlertCounter(pw *PluginWatcher) { + pw.AlertCountByPluginName.Lock() + for k := range pw.AlertCountByPluginName.data { + pw.AlertCountByPluginName.data[k] = 0 + } + pw.AlertCountByPluginName.Unlock() +} + +func insertNAlertsToPlugin(pw *PluginWatcher, n int, pluginName string) { + for i := 0; i < n; i++ { + pw.Inserts <- pluginName + } +} + +func listenChannelWithTimeout(ctx context.Context, channel chan string) error { + select { + case x := <-channel: + log.Printf("received -> %v", x) + case <-ctx.Done(): + return ctx.Err() + } + return nil +} + +func TestPluginWatcherInterval(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows because timing is not reliable") + } + pw := PluginWatcher{} + alertsByPluginName := make(map[string][]*models.Alert) + testTomb := tomb.Tomb{} + configs := map[string]PluginConfig{ + "testPlugin": { + GroupWait: time.Millisecond, + }, + } + pw.Init(configs, alertsByPluginName) + pw.Start(&testTomb) + + ct, cancel := context.WithTimeout(ctx, time.Microsecond) + defer cancel() + err := listenChannelWithTimeout(ct, pw.PluginEvents) + assert.ErrorContains(t, err, "context deadline exceeded") + resetTestTomb(&testTomb, &pw) + testTomb = tomb.Tomb{} + pw.Start(&testTomb) + + ct, cancel = context.WithTimeout(ctx, time.Millisecond*5) + defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) + assert.NilError(t, err) + resetTestTomb(&testTomb, &pw) + // This is to avoid the int complaining +} + +func TestPluginAlertCountWatcher(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows because timing is not reliable") + } + pw := PluginWatcher{} + alertsByPluginName := make(map[string][]*models.Alert) + configs := map[string]PluginConfig{ + "testPlugin": { + GroupThreshold: 5, + }, + } + testTomb := tomb.Tomb{} + pw.Init(configs, alertsByPluginName) + pw.Start(&testTomb) + + // Channel won't contain any events since threshold is not crossed. + ct, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + err := listenChannelWithTimeout(ct, pw.PluginEvents) + assert.ErrorContains(t, err, "context deadline exceeded") + + // Channel won't contain any events since threshold is not crossed. + resetWatcherAlertCounter(&pw) + insertNAlertsToPlugin(&pw, 4, "testPlugin") + ct, cancel = context.WithTimeout(ctx, time.Second) + defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) + assert.ErrorContains(t, err, "context deadline exceeded") + + // Channel will contain an event since threshold is crossed. + resetWatcherAlertCounter(&pw) + insertNAlertsToPlugin(&pw, 5, "testPlugin") + ct, cancel = context.WithTimeout(ctx, time.Second) + defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) + assert.NilError(t, err) + resetTestTomb(&testTomb, &pw) +} diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go new file mode 100644 index 0000000..b2ca602 --- /dev/null +++ b/pkg/csprofiles/csprofiles.go @@ -0,0 +1,196 @@ +package csprofiles + +import ( + "fmt" + "time" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +type Runtime struct { + RuntimeFilters []*vm.Program `json:"-" yaml:"-"` + DebugFilters []*exprhelpers.ExprDebugger `json:"-" yaml:"-"` + RuntimeDurationExpr *vm.Program `json:"-" yaml:"-"` + DebugDurationExpr *exprhelpers.ExprDebugger `json:"-" yaml:"-"` + Cfg *csconfig.ProfileCfg `json:"-" yaml:"-"` + Logger *log.Entry `json:"-" yaml:"-"` +} + +var defaultDuration = "4h" + +func NewProfile(profilesCfg []*csconfig.ProfileCfg) ([]*Runtime, error) { + var err error + profilesRuntime := make([]*Runtime, 0) + + for _, profile := range profilesCfg { + var runtimeFilter, runtimeDurationExpr *vm.Program + var debugFilter, debugDurationExpr *exprhelpers.ExprDebugger + runtime := &Runtime{} + xlog := log.New() + if err := types.ConfigureLogger(xlog); err != nil { + log.Fatalf("While creating profiles-specific logger : %s", err) + } + xlog.SetLevel(log.InfoLevel) + runtime.Logger = xlog.WithFields(log.Fields{ + "type": "profile", + "name": profile.Name, + }) + + runtime.RuntimeFilters = make([]*vm.Program, len(profile.Filters)) + runtime.DebugFilters = make([]*exprhelpers.ExprDebugger, len(profile.Filters)) + runtime.Cfg = profile + + for fIdx, filter := range profile.Filters { + if runtimeFilter, err = expr.Compile(filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"Alert": &models.Alert{}}))); err != nil { + return []*Runtime{}, errors.Wrapf(err, "error compiling filter of '%s'", profile.Name) + } + runtime.RuntimeFilters[fIdx] = runtimeFilter + if profile.Debug != nil && *profile.Debug { + if debugFilter, err = exprhelpers.NewDebugger(filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"Alert": &models.Alert{}}))); err != nil { + log.Debugf("Error compiling debug filter of %s : %s", profile.Name, err) + // Don't fail if we can't compile the filter - for now + // return errors.Wrapf(err, "Error compiling debug filter of %s", profile.Name) + } + runtime.DebugFilters[fIdx] = debugFilter + runtime.Logger.Logger.SetLevel(log.DebugLevel) + } + } + + if profile.DurationExpr != "" { + if runtimeDurationExpr, err = expr.Compile(profile.DurationExpr, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"Alert": &models.Alert{}}))); err != nil { + return []*Runtime{}, errors.Wrapf(err, "error compiling duration_expr of %s", profile.Name) + } + + runtime.RuntimeDurationExpr = runtimeDurationExpr + if profile.Debug != nil && *profile.Debug { + if debugDurationExpr, err = exprhelpers.NewDebugger(profile.DurationExpr, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"Alert": &models.Alert{}}))); err != nil { + log.Debugf("Error compiling debug duration_expr of %s : %s", profile.Name, err) + } + runtime.DebugDurationExpr = debugDurationExpr + } + } + + for _, decision := range profile.Decisions { + if runtime.RuntimeDurationExpr == nil { + if _, err := time.ParseDuration(*decision.Duration); err != nil { + return []*Runtime{}, errors.Wrapf(err, "error parsing duration '%s' of %s", *decision.Duration, profile.Name) + } + } + } + + profilesRuntime = append(profilesRuntime, runtime) + } + return profilesRuntime, nil +} + +func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*models.Decision, error) { + var decisions []*models.Decision + + for _, refDecision := range Profile.Cfg.Decisions { + decision := models.Decision{} + /*the reference decision from profile is in sumulated mode */ + if refDecision.Simulated != nil && *refDecision.Simulated { + decision.Simulated = new(bool) + *decision.Simulated = true + /*the event is already in simulation mode */ + } else if Alert.Simulated != nil && *Alert.Simulated { + decision.Simulated = new(bool) + *decision.Simulated = true + } + /*If the profile specifies a scope, this will prevail. + If not, we're going to get the scope from the source itself*/ + decision.Scope = new(string) + if refDecision.Scope != nil && *refDecision.Scope != "" { + *decision.Scope = *refDecision.Scope + } else { + *decision.Scope = *Alert.Source.Scope + } + /*some fields are populated from the reference object : duration, scope, type*/ + decision.Duration = new(string) + if Profile.Cfg.DurationExpr != "" && Profile.RuntimeDurationExpr != nil { + duration, err := expr.Run(Profile.RuntimeDurationExpr, exprhelpers.GetExprEnv(map[string]interface{}{"Alert": Alert})) + if err != nil { + Profile.Logger.Warningf("Failed to run duration_expr : %v", err) + *decision.Duration = *refDecision.Duration + } else { + durationStr := fmt.Sprint(duration) + if _, err := time.ParseDuration(durationStr); err != nil { + Profile.Logger.Warningf("Failed to parse expr duration result '%s'", duration) + *decision.Duration = *refDecision.Duration + } else { + *decision.Duration = durationStr + } + } + } else { + if refDecision.Duration == nil { + *decision.Duration = defaultDuration + } + *decision.Duration = *refDecision.Duration + } + + decision.Type = new(string) + *decision.Type = *refDecision.Type + + /*for the others, let's populate it from the alert and its source*/ + decision.Value = new(string) + *decision.Value = *Alert.Source.Value + decision.Origin = new(string) + *decision.Origin = "crowdsec" + if refDecision.Origin != nil { + *decision.Origin = fmt.Sprintf("%s/%s", *decision.Origin, *refDecision.Origin) + } + decision.Scenario = new(string) + *decision.Scenario = *Alert.Scenario + decisions = append(decisions, &decision) + } + return decisions, nil +} + +//EvaluateProfile is going to evaluate an Alert against a profile to generate Decisions +func (Profile *Runtime) EvaluateProfile(Alert *models.Alert) ([]*models.Decision, bool, error) { + var decisions []*models.Decision + + matched := false + for eIdx, expression := range Profile.RuntimeFilters { + output, err := expr.Run(expression, exprhelpers.GetExprEnv(map[string]interface{}{"Alert": Alert})) + if err != nil { + Profile.Logger.Warningf("failed to run whitelist expr : %v", err) + return nil, matched, errors.Wrapf(err, "while running expression %s", Profile.Cfg.Filters[eIdx]) + } + switch out := output.(type) { + case bool: + if Profile.Cfg.Debug != nil && *Profile.Cfg.Debug { + Profile.DebugFilters[eIdx].Run(Profile.Logger, out, exprhelpers.GetExprEnv(map[string]interface{}{"Alert": Alert})) + } + if out { + matched = true + /*the expression matched, create the associated decision*/ + subdecisions, err := Profile.GenerateDecisionFromProfile(Alert) + if err != nil { + return nil, matched, errors.Wrapf(err, "while generating decision from profile %s", Profile.Cfg.Name) + } + + decisions = append(decisions, subdecisions...) + } else { + Profile.Logger.Debugf("Profile %s filter is unsuccessful", Profile.Cfg.Name) + if Profile.Cfg.OnFailure == "break" { + break + } + } + + default: + return nil, matched, fmt.Errorf("unexpected type %t (%v) while running '%s'", output, output, Profile.Cfg.Filters[eIdx]) + + } + + } + + return decisions, matched, nil +} diff --git a/pkg/csprofiles/csprofiles_test.go b/pkg/csprofiles/csprofiles_test.go new file mode 100644 index 0000000..d9a7570 --- /dev/null +++ b/pkg/csprofiles/csprofiles_test.go @@ -0,0 +1,202 @@ +package csprofiles + +import ( + "fmt" + "reflect" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/models" + "gotest.tools/v3/assert" +) + +var ( + scope = "Country" + typ = "ban" + boolFalse = false + boolTrue = true + duration = "1h" + + value = "CH" + scenario = "ssh-bf" +) + +func TestNewProfile(t *testing.T) { + tests := []struct { + name string + profileCfg *csconfig.ProfileCfg + expectedNbProfile int + }{ + { + name: "filter ok and duration_expr ok", + profileCfg: &csconfig.ProfileCfg{ + Filters: []string{ + "1==1", + }, + DurationExpr: "1==1", + Debug: &boolFalse, + Decisions: []models.Decision{ + {Type: &typ, Scope: &scope, Simulated: &boolTrue, Duration: &duration}, + }, + }, + expectedNbProfile: 1, + }, + { + name: "filter NOK and duration_expr ok", + profileCfg: &csconfig.ProfileCfg{ + Filters: []string{ + "1==1", + "unknownExprHelper() == 'foo'", + }, + DurationExpr: "1==1", + Debug: &boolFalse, + Decisions: []models.Decision{ + {Type: &typ, Scope: &scope, Simulated: &boolFalse, Duration: &duration}, + }, + }, + expectedNbProfile: 0, + }, + { + name: "filter ok and duration_expr NOK", + profileCfg: &csconfig.ProfileCfg{ + Filters: []string{ + "1==1", + }, + DurationExpr: "unknownExprHelper() == 'foo'", + Debug: &boolFalse, + Decisions: []models.Decision{ + {Type: &typ, Scope: &scope, Simulated: &boolFalse, Duration: &duration}, + }, + }, + expectedNbProfile: 0, + }, + { + name: "filter ok and duration_expr ok + DEBUG", + profileCfg: &csconfig.ProfileCfg{ + Filters: []string{ + "1==1", + }, + DurationExpr: "1==1", + Debug: &boolTrue, + Decisions: []models.Decision{ + {Type: &typ, Scope: &scope, Simulated: &boolFalse, Duration: &duration}, + }, + }, + expectedNbProfile: 1, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + profilesCfg := []*csconfig.ProfileCfg{ + test.profileCfg, + } + profile, _ := NewProfile(profilesCfg) + fmt.Printf("expected : %+v | result : %+v", test.expectedNbProfile, len(profile)) + assert.Equal(t, test.expectedNbProfile, len(profile)) + }) + } +} + +func TestEvaluateProfile(t *testing.T) { + type args struct { + profileCfg *csconfig.ProfileCfg + Alert *models.Alert + } + tests := []struct { + name string + args args + expectedDecisionCount int // count of expected decisions + expectedDuration string + expectedMatchStatus bool + }{ + { + name: "simple pass single expr", + args: args{ + profileCfg: &csconfig.ProfileCfg{ + Filters: []string{fmt.Sprintf("Alert.GetScenario() == \"%s\"", scenario)}, + Debug: &boolFalse, + }, + Alert: &models.Alert{Remediation: true, Scenario: &scenario}, + }, + expectedDecisionCount: 0, + expectedMatchStatus: true, + }, + { + name: "simple fail single expr", + args: args{ + profileCfg: &csconfig.ProfileCfg{ + Filters: []string{"Alert.GetScenario() == \"Foo\""}, + }, + Alert: &models.Alert{Remediation: true}, + }, + expectedDecisionCount: 0, + expectedMatchStatus: false, + }, + { + name: "1 expr fail 1 expr pass should still eval to match", + args: args{ + profileCfg: &csconfig.ProfileCfg{ + Filters: []string{"1==1", "1!=1"}, + }, + Alert: &models.Alert{Remediation: true}, + }, + expectedDecisionCount: 0, + expectedMatchStatus: true, + }, + { + name: "simple filter with 2 decision", + args: args{ + profileCfg: &csconfig.ProfileCfg{ + Filters: []string{"1==1"}, + Decisions: []models.Decision{ + {Type: &typ, Scope: &scope, Simulated: &boolTrue, Duration: &duration}, + {Type: &typ, Scope: &scope, Simulated: &boolFalse, Duration: &duration}, + }, + }, + Alert: &models.Alert{Remediation: true, Scenario: &scenario, Source: &models.Source{Value: &value}}, + }, + expectedDecisionCount: 2, + expectedMatchStatus: true, + }, + { + name: "simple filter with decision_expr", + args: args{ + profileCfg: &csconfig.ProfileCfg{ + Filters: []string{"1==1"}, + Decisions: []models.Decision{ + {Type: &typ, Scope: &scope, Simulated: &boolFalse}, + }, + DurationExpr: "Sprintf('%dh', 4*4)", + }, + Alert: &models.Alert{Remediation: true, Scenario: &scenario, Source: &models.Source{Value: &value}}, + }, + expectedDecisionCount: 1, + expectedDuration: "16h", + expectedMatchStatus: true, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + profilesCfg := []*csconfig.ProfileCfg{ + tt.args.profileCfg, + } + profile, err := NewProfile(profilesCfg) + if err != nil { + t.Errorf("failed to get newProfile : %+v", err) + } + got, got1, _ := profile[0].EvaluateProfile(tt.args.Alert) + if !reflect.DeepEqual(len(got), tt.expectedDecisionCount) { + t.Errorf("EvaluateProfile() got = %+v, want %+v", got, tt.expectedDecisionCount) + } + if got1 != tt.expectedMatchStatus { + t.Errorf("EvaluateProfile() got1 = %v, want %v", got1, tt.expectedMatchStatus) + } + if tt.expectedDuration != "" { + assert.Equal(t, tt.expectedDuration, *got[0].Duration, "The two durations should be the same") + } + }) + } +} diff --git a/pkg/cstest/filenotfound_unix.go b/pkg/cstest/filenotfound_unix.go new file mode 100644 index 0000000..a5a426d --- /dev/null +++ b/pkg/cstest/filenotfound_unix.go @@ -0,0 +1,5 @@ +//go:build unix + +package cstest + +const FileNotFoundMessage = "no such file or directory" diff --git a/pkg/cstest/filenotfound_windows.go b/pkg/cstest/filenotfound_windows.go new file mode 100644 index 0000000..31816c0 --- /dev/null +++ b/pkg/cstest/filenotfound_windows.go @@ -0,0 +1,5 @@ +//go:build windows + +package cstest + +const FileNotFoundMessage = "The system cannot find the file specified." diff --git a/pkg/cstest/utils.go b/pkg/cstest/utils.go new file mode 100644 index 0000000..2c26be8 --- /dev/null +++ b/pkg/cstest/utils.go @@ -0,0 +1,30 @@ +package cstest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func AssertErrorContains(t *testing.T, err error, expectedErr string) { + t.Helper() + + if expectedErr != "" { + assert.ErrorContains(t, err, expectedErr) + return + } + + assert.NoError(t, err) +} + +func RequireErrorContains(t *testing.T, err error, expectedErr string) { + t.Helper() + + if expectedErr != "" { + require.ErrorContains(t, err, expectedErr) + return + } + + require.NoError(t, err) +} diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go new file mode 100644 index 0000000..f48dc22 --- /dev/null +++ b/pkg/cwhub/cwhub.go @@ -0,0 +1,368 @@ +package cwhub + +import ( + "crypto/sha256" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/enescakir/emoji" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "golang.org/x/mod/semver" +) + +/*managed configuration types*/ +var PARSERS = "parsers" +var PARSERS_OVFLW = "postoverflows" +var SCENARIOS = "scenarios" +var COLLECTIONS = "collections" +var ItemTypes = []string{PARSERS, PARSERS_OVFLW, SCENARIOS, COLLECTIONS} + +var hubIdx map[string]map[string]Item + +var RawFileURLTemplate = "https://hub-cdn.crowdsec.net/%s/%s" +var HubBranch = "master" +var HubIndexFile = ".index.json" + +type ItemVersion struct { + Digest string `json:"digest,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` +} + +type ItemHubStatus struct { + Name string `json:"name"` + LocalVersion string `json:"local_version"` + LocalPath string `json:"local_path"` + Description string `json:"description"` + UTF8_Status string `json:"utf8_status"` + Status string `json:"status"` +} + +//Item can be : parsed, scenario, collection +type Item struct { + /*descriptive info*/ + Type string `yaml:"type,omitempty" json:"type,omitempty"` //parser|postoverflows|scenario|collection(|enrich) + Stage string `json:"stage,omitempty" yaml:"stage,omitempty,omitempty"` //Stage for parser|postoverflow : s00-raw/s01-... + Name string `json:"name,omitempty"` //as seen in .config.json, usually "author/name" + FileName string `json:"file_name,omitempty"` //the filename, ie. apache2-logs.yaml + Description string `yaml:"description,omitempty" json:"description,omitempty"` //as seen in .config.json + Author string `json:"author,omitempty"` //as seen in .config.json + References []string `yaml:"references,omitempty" json:"references,omitempty"` //as seen in .config.json + BelongsToCollections []string `yaml:"belongs_to_collections,omitempty" json:"belongs_to_collections,omitempty"` /*if it's part of collections, track name here*/ + + /*remote (hub) infos*/ + RemoteURL string `yaml:"remoteURL,omitempty" json:"remoteURL,omitempty"` //the full remote uri of file in http + RemotePath string `json:"path,omitempty" yaml:"remote_path,omitempty"` //the path relative to git ie. /parsers/stage/author/file.yaml + RemoteHash string `yaml:"hash,omitempty" json:"hash,omitempty"` //the meow + Version string `json:"version,omitempty"` //the last version + Versions map[string]ItemVersion `json:"versions,omitempty" yaml:"-"` //the list of existing versions + + /*local (deployed) infos*/ + LocalPath string `yaml:"local_path,omitempty" json:"local_path,omitempty"` //the local path relative to ${CFG_DIR} + //LocalHubPath string + LocalVersion string `json:"local_version,omitempty"` + LocalHash string `json:"local_hash,omitempty"` //the local meow + Installed bool `json:"installed,omitempty"` + Downloaded bool `json:"downloaded,omitempty"` + UpToDate bool `json:"up_to_date,omitempty"` + Tainted bool `json:"tainted,omitempty"` //has it been locally modified + Local bool `json:"local,omitempty"` //if it's a non versioned control one + + /*if it's a collection, it not a single file*/ + Parsers []string `yaml:"parsers,omitempty" json:"parsers,omitempty"` + PostOverflows []string `yaml:"postoverflows,omitempty" json:"postoverflows,omitempty"` + Scenarios []string `yaml:"scenarios,omitempty" json:"scenarios,omitempty"` + Collections []string `yaml:"collections,omitempty" json:"collections,omitempty"` +} + +func (i *Item) toHubStatus() ItemHubStatus { + hubStatus := ItemHubStatus{} + hubStatus.Name = i.Name + hubStatus.LocalVersion = i.LocalVersion + hubStatus.LocalPath = i.LocalPath + hubStatus.Description = i.Description + + status, ok, warning, managed := ItemStatus(*i) + hubStatus.Status = status + if !managed { + hubStatus.UTF8_Status = fmt.Sprintf("%v %s", emoji.House, status) + } else if !i.Installed { + hubStatus.UTF8_Status = fmt.Sprintf("%v %s", emoji.Prohibited, status) + } else if warning { + hubStatus.UTF8_Status = fmt.Sprintf("%v %s", emoji.Warning, status) + } else if ok { + hubStatus.UTF8_Status = fmt.Sprintf("%v %s", emoji.CheckMark, status) + } + return hubStatus +} + +var skippedLocal = 0 +var skippedTainted = 0 + +/*To be used when reference(s) (is/are) missing in a collection*/ +var ReferenceMissingError = errors.New("Reference(s) missing in collection") +var MissingHubIndex = errors.New("hub index can't be found") + +//GetVersionStatus : semver requires 'v' prefix +func GetVersionStatus(v *Item) int { + return semver.Compare("v"+v.Version, "v"+v.LocalVersion) +} + +// calculate sha256 of a file +func getSHA256(filepath string) (string, error) { + /* Digest of file */ + f, err := os.Open(filepath) + if err != nil { + return "", fmt.Errorf("unable to open '%s' : %s", filepath, err) + } + + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return "", fmt.Errorf("unable to calculate sha256 of '%s': %s", filepath, err) + } + + return fmt.Sprintf("%x", h.Sum(nil)), nil +} + +func GetItemMap(itemType string) map[string]Item { + var m map[string]Item + var ok bool + + if m, ok = hubIdx[itemType]; !ok { + return nil + } + return m +} + +//GetItemByPath retrieves the item from hubIdx based on the path. To achieve this it will resolve symlink to find associated hub item. +func GetItemByPath(itemType string, itemPath string) (*Item, error) { + /*try to resolve symlink*/ + finalName := "" + f, err := os.Lstat(itemPath) + if err != nil { + return nil, errors.Wrapf(err, "while performing lstat on %s", itemPath) + } + + if f.Mode()&os.ModeSymlink == 0 { + /*it's not a symlink, it should be the filename itsef the key*/ + finalName = filepath.Base(itemPath) + } else { + /*resolve the symlink to hub file*/ + pathInHub, err := os.Readlink(itemPath) + if err != nil { + return nil, errors.Wrapf(err, "while reading symlink of %s", itemPath) + } + //extract author from path + fname := filepath.Base(pathInHub) + author := filepath.Base(filepath.Dir(pathInHub)) + //trim yaml suffix + fname = strings.TrimSuffix(fname, ".yaml") + fname = strings.TrimSuffix(fname, ".yml") + finalName = fmt.Sprintf("%s/%s", author, fname) + } + + /*it's not a symlink, it should be the filename itsef the key*/ + if m := GetItemMap(itemType); m != nil { + if v, ok := m[finalName]; ok { + return &v, nil + } + return nil, fmt.Errorf("%s not found in %s", finalName, itemType) + } + return nil, fmt.Errorf("item type %s doesn't exist", itemType) +} + +func GetItem(itemType string, itemName string) *Item { + if m, ok := GetItemMap(itemType)[itemName]; ok { + return &m + } + return nil +} + +func AddItem(itemType string, item Item) error { + in := false + for _, itype := range ItemTypes { + if itype == itemType { + in = true + } + } + if !in { + return fmt.Errorf("ItemType %s is unknown", itemType) + } + hubIdx[itemType][item.Name] = item + return nil +} + +func DisplaySummary() { + log.Printf("Loaded %d collecs, %d parsers, %d scenarios, %d post-overflow parsers", len(hubIdx[COLLECTIONS]), + len(hubIdx[PARSERS]), len(hubIdx[SCENARIOS]), len(hubIdx[PARSERS_OVFLW])) + if skippedLocal > 0 || skippedTainted > 0 { + log.Printf("unmanaged items : %d local, %d tainted", skippedLocal, skippedTainted) + } +} + +//returns: human-text, Enabled, Warning, Unmanaged +func ItemStatus(v Item) (string, bool, bool, bool) { + strret := "disabled" + Ok := false + if v.Installed { + Ok = true + strret = "enabled" + } + + Managed := true + if v.Local { + Managed = false + strret += ",local" + } + + //tainted or out of date + Warning := false + if v.Tainted { + Warning = true + strret += ",tainted" + } else if !v.UpToDate && !v.Local { + strret += ",update-available" + Warning = true + } + return strret, Ok, Warning, Managed +} + +func GetInstalledScenariosAsString() ([]string, error) { + var retStr []string + + items, err := GetInstalledScenarios() + if err != nil { + return nil, errors.Wrap(err, "while fetching scenarios") + } + for _, it := range items { + retStr = append(retStr, it.Name) + } + return retStr, nil +} + +func GetInstalledScenarios() ([]Item, error) { + var retItems []Item + + if _, ok := hubIdx[SCENARIOS]; !ok { + return nil, fmt.Errorf("no scenarios in hubIdx") + } + for _, item := range hubIdx[SCENARIOS] { + if item.Installed { + retItems = append(retItems, item) + } + } + return retItems, nil +} + +func GetInstalledParsers() ([]Item, error) { + var retItems []Item + + if _, ok := hubIdx[PARSERS]; !ok { + return nil, fmt.Errorf("no parsers in hubIdx") + } + for _, item := range hubIdx[PARSERS] { + if item.Installed { + retItems = append(retItems, item) + } + } + return retItems, nil +} + +func GetInstalledParsersAsString() ([]string, error) { + var retStr []string + + items, err := GetInstalledParsers() + if err != nil { + return nil, errors.Wrap(err, "while fetching parsers") + } + for _, it := range items { + retStr = append(retStr, it.Name) + } + return retStr, nil +} + +func GetInstalledPostOverflows() ([]Item, error) { + var retItems []Item + + if _, ok := hubIdx[PARSERS_OVFLW]; !ok { + return nil, fmt.Errorf("no post overflows in hubIdx") + } + for _, item := range hubIdx[PARSERS_OVFLW] { + if item.Installed { + retItems = append(retItems, item) + } + } + return retItems, nil +} + +func GetInstalledPostOverflowsAsString() ([]string, error) { + var retStr []string + + items, err := GetInstalledPostOverflows() + if err != nil { + return nil, errors.Wrap(err, "while fetching post overflows") + } + for _, it := range items { + retStr = append(retStr, it.Name) + } + return retStr, nil +} + +func GetInstalledCollectionsAsString() ([]string, error) { + var retStr []string + + items, err := GetInstalledCollections() + if err != nil { + return nil, errors.Wrap(err, "while fetching collections") + } + for _, it := range items { + retStr = append(retStr, it.Name) + } + return retStr, nil +} + +func GetInstalledCollections() ([]Item, error) { + var retItems []Item + + if _, ok := hubIdx[COLLECTIONS]; !ok { + return nil, fmt.Errorf("no collection in hubIdx") + } + for _, item := range hubIdx[COLLECTIONS] { + if item.Installed { + retItems = append(retItems, item) + } + } + return retItems, nil +} + +//Returns a list of entries for packages : name, status, local_path, local_version, utf8_status (fancy) +func GetHubStatusForItemType(itemType string, name string, all bool) []ItemHubStatus { + if _, ok := hubIdx[itemType]; !ok { + log.Errorf("type %s doesn't exist", itemType) + + return nil + } + + var ret = make([]ItemHubStatus, 0) + /*remember, you do it for the user :)*/ + for _, item := range hubIdx[itemType] { + if name != "" && name != item.Name { + //user has requested a specific name + continue + } + //Only enabled items ? + if !all && !item.Installed { + continue + } + //Check the item status + ret = append(ret, item.toHubStatus()) + } + sort.Slice(ret, func(i, j int) bool { return ret[i].Name < ret[j].Name }) + return ret +} diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go new file mode 100644 index 0000000..6aa9431 --- /dev/null +++ b/pkg/cwhub/cwhub_test.go @@ -0,0 +1,427 @@ +package cwhub + +import ( + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + log "github.com/sirupsen/logrus" +) + +/* + To test : + - Download 'first' hub index + - Update hub index + - Install collection + list content + - Taint existing parser + list + - Upgrade collection +*/ + +var responseByPath map[string]string + +func TestItemStatus(t *testing.T) { + cfg := test_prepenv() + + err := UpdateHubIdx(cfg.Hub) + //DownloadHubIdx() + if err != nil { + t.Fatalf("failed to download index : %s", err) + } + if err := GetHubIdx(cfg.Hub); err != nil { + t.Fatalf("failed to load hub index : %s", err) + } + + //get existing map + x := GetItemMap(COLLECTIONS) + if len(x) == 0 { + t.Fatalf("expected non empty result") + } + + //Get item : good and bad + for k := range x { + item := GetItem(COLLECTIONS, k) + if item == nil { + t.Fatalf("expected item") + } + item.Installed = true + item.UpToDate = false + item.Local = false + item.Tainted = false + txt, _, _, _ := ItemStatus(*item) + if txt != "enabled,update-available" { + log.Fatalf("got '%s'", txt) + } + + item.Installed = false + item.UpToDate = false + item.Local = true + item.Tainted = false + txt, _, _, _ = ItemStatus(*item) + if txt != "disabled,local" { + log.Fatalf("got '%s'", txt) + } + + break + } + DisplaySummary() +} + +func TestGetters(t *testing.T) { + cfg := test_prepenv() + + err := UpdateHubIdx(cfg.Hub) + //DownloadHubIdx() + if err != nil { + t.Fatalf("failed to download index : %s", err) + } + if err := GetHubIdx(cfg.Hub); err != nil { + t.Fatalf("failed to load hub index : %s", err) + } + + //get non existing map + empty := GetItemMap("ratata") + if empty != nil { + t.Fatalf("expected nil result") + } + //get existing map + x := GetItemMap(COLLECTIONS) + if len(x) == 0 { + t.Fatalf("expected non empty result") + } + + //Get item : good and bad + for k := range x { + empty := GetItem(COLLECTIONS, k+"nope") + if empty != nil { + t.Fatalf("expected empty item") + } + + item := GetItem(COLLECTIONS, k) + if item == nil { + t.Fatalf("expected non empty item") + } + + //Add item and get it + item.Name += "nope" + if err := AddItem(COLLECTIONS, *item); err != nil { + t.Fatalf("didn't expect error : %s", err) + } + + newitem := GetItem(COLLECTIONS, item.Name) + if newitem == nil { + t.Fatalf("expected non empty item") + } + + //Add bad item + if err := AddItem("ratata", *item); err != nil { + if fmt.Sprintf("%s", err) != "ItemType ratata is unknown" { + t.Fatalf("unexpected error") + } + } else { + t.Fatalf("Expected error") + } + + break + } + +} + +func TestIndexDownload(t *testing.T) { + cfg := test_prepenv() + + err := UpdateHubIdx(cfg.Hub) + //DownloadHubIdx() + if err != nil { + t.Fatalf("failed to download index : %s", err) + } + if err := GetHubIdx(cfg.Hub); err != nil { + t.Fatalf("failed to load hub index : %s", err) + } +} + +func getTestCfg() (cfg *csconfig.Config) { + cfg = &csconfig.Config{Hub: &csconfig.Hub{}} + cfg.Hub.ConfigDir, _ = filepath.Abs("./install") + cfg.Hub.HubDir, _ = filepath.Abs("./hubdir") + cfg.Hub.HubIndexFile = filepath.Clean("./hubdir/.index.json") + return +} + +func test_prepenv() *csconfig.Config { + resetResponseByPath() + log.SetLevel(log.DebugLevel) + cfg := getTestCfg() + //Mock the http client + http.DefaultClient.Transport = newMockTransport() + + if err := os.RemoveAll(cfg.Hub.ConfigDir); err != nil { + log.Fatalf("failed to remove %s : %s", cfg.Hub.ConfigDir, err) + } + + if err := os.MkdirAll(cfg.Hub.ConfigDir, 0700); err != nil { + log.Fatalf("mkdir : %s", err) + } + + if err := os.RemoveAll(cfg.Hub.HubDir); err != nil { + log.Fatalf("failed to remove %s : %s", cfg.Hub.HubDir, err) + } + if err := os.MkdirAll(cfg.Hub.HubDir, 0700); err != nil { + log.Fatalf("failed to mkdir %s : %s", cfg.Hub.HubDir, err) + } + + if err := UpdateHubIdx(cfg.Hub); err != nil { + log.Fatalf("failed to download index : %s", err) + } + + // if err := os.RemoveAll(cfg.Hub.InstallDir); err != nil { + // log.Fatalf("failed to remove %s : %s", cfg.Hub.InstallDir, err) + // } + // if err := os.MkdirAll(cfg.Hub.InstallDir, 0700); err != nil { + // log.Fatalf("failed to mkdir %s : %s", cfg.Hub.InstallDir, err) + // } + return cfg + +} + +func testInstallItem(cfg *csconfig.Hub, t *testing.T, item Item) { + + //Install the parser + item, err := DownloadLatest(cfg, item, false, false) + if err != nil { + t.Fatalf("error while downloading %s : %v", item.Name, err) + } + if err, _ := LocalSync(cfg); err != nil { + t.Fatalf("taint: failed to run localSync : %s", err) + } + if !hubIdx[item.Type][item.Name].UpToDate { + t.Fatalf("download: %s should be up-to-date", item.Name) + } + if hubIdx[item.Type][item.Name].Installed { + t.Fatalf("download: %s should not be installed", item.Name) + } + if hubIdx[item.Type][item.Name].Tainted { + t.Fatalf("download: %s should not be tainted", item.Name) + } + + item, err = EnableItem(cfg, item) + if err != nil { + t.Fatalf("error while enabling %s : %v.", item.Name, err) + } + if err, _ := LocalSync(cfg); err != nil { + t.Fatalf("taint: failed to run localSync : %s", err) + } + if !hubIdx[item.Type][item.Name].Installed { + t.Fatalf("install: %s should be installed", item.Name) + } +} + +func testTaintItem(cfg *csconfig.Hub, t *testing.T, item Item) { + if hubIdx[item.Type][item.Name].Tainted { + t.Fatalf("pre-taint: %s should not be tainted", item.Name) + } + f, err := os.OpenFile(item.LocalPath, os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + t.Fatalf("(taint) opening %s (%s) : %s", item.LocalPath, item.Name, err) + } + defer f.Close() + + if _, err = f.WriteString("tainted"); err != nil { + t.Fatalf("tainting %s : %s", item.Name, err) + } + //Local sync and check status + if err, _ := LocalSync(cfg); err != nil { + t.Fatalf("taint: failed to run localSync : %s", err) + } + if !hubIdx[item.Type][item.Name].Tainted { + t.Fatalf("taint: %s should be tainted", item.Name) + } +} + +func testUpdateItem(cfg *csconfig.Hub, t *testing.T, item Item) { + + if hubIdx[item.Type][item.Name].UpToDate { + t.Fatalf("update: %s should NOT be up-to-date", item.Name) + } + //Update it + check status + item, err := DownloadLatest(cfg, item, true, true) + if err != nil { + t.Fatalf("failed to update %s : %s", item.Name, err) + } + //Local sync and check status + if err, _ := LocalSync(cfg); err != nil { + t.Fatalf("failed to run localSync : %s", err) + } + if !hubIdx[item.Type][item.Name].UpToDate { + t.Fatalf("update: %s should be up-to-date", item.Name) + } + if hubIdx[item.Type][item.Name].Tainted { + t.Fatalf("update: %s should not be tainted anymore", item.Name) + } +} + +func testDisableItem(cfg *csconfig.Hub, t *testing.T, item Item) { + if !item.Installed { + t.Fatalf("disable: %s should be installed", item.Name) + } + //Remove + item, err := DisableItem(cfg, item, false, false) + if err != nil { + t.Fatalf("failed to disable item : %v", err) + } + //Local sync and check status + if err, warns := LocalSync(cfg); err != nil || len(warns) > 0 { + t.Fatalf("failed to run localSync : %s (%+v)", err, warns) + } + if hubIdx[item.Type][item.Name].Tainted { + t.Fatalf("disable: %s should not be tainted anymore", item.Name) + } + if hubIdx[item.Type][item.Name].Installed { + t.Fatalf("disable: %s should not be installed anymore", item.Name) + } + if !hubIdx[item.Type][item.Name].Downloaded { + t.Fatalf("disable: %s should still be downloaded", item.Name) + } + //Purge + item, err = DisableItem(cfg, item, true, false) + if err != nil { + t.Fatalf("failed to purge item : %v", err) + } + //Local sync and check status + if err, warns := LocalSync(cfg); err != nil || len(warns) > 0 { + t.Fatalf("failed to run localSync : %s (%+v)", err, warns) + } + if hubIdx[item.Type][item.Name].Installed { + t.Fatalf("disable: %s should not be installed anymore", item.Name) + } + if hubIdx[item.Type][item.Name].Downloaded { + t.Fatalf("disable: %s should not be downloaded", item.Name) + } +} + +func TestInstallParser(t *testing.T) { + + /* + - install a random parser + - check its status + - taint it + - check its status + - force update it + - check its status + - remove it + */ + cfg := test_prepenv() + + getHubIdxOrFail(t) + //map iteration is random by itself + for _, it := range hubIdx[PARSERS] { + testInstallItem(cfg.Hub, t, it) + it = hubIdx[PARSERS][it.Name] + _ = GetHubStatusForItemType(PARSERS, it.Name, false) + testTaintItem(cfg.Hub, t, it) + it = hubIdx[PARSERS][it.Name] + _ = GetHubStatusForItemType(PARSERS, it.Name, false) + testUpdateItem(cfg.Hub, t, it) + it = hubIdx[PARSERS][it.Name] + testDisableItem(cfg.Hub, t, it) + it = hubIdx[PARSERS][it.Name] + + break + } +} + +func TestInstallCollection(t *testing.T) { + + /* + - install a random parser + - check its status + - taint it + - check its status + - force update it + - check its status + - remove it + */ + cfg := test_prepenv() + + getHubIdxOrFail(t) + //map iteration is random by itself + for _, it := range hubIdx[COLLECTIONS] { + testInstallItem(cfg.Hub, t, it) + it = hubIdx[COLLECTIONS][it.Name] + testTaintItem(cfg.Hub, t, it) + it = hubIdx[COLLECTIONS][it.Name] + testUpdateItem(cfg.Hub, t, it) + it = hubIdx[COLLECTIONS][it.Name] + testDisableItem(cfg.Hub, t, it) + + it = hubIdx[COLLECTIONS][it.Name] + x := GetHubStatusForItemType(COLLECTIONS, it.Name, false) + log.Printf("%+v", x) + break + } +} + +type mockTransport struct{} + +func newMockTransport() http.RoundTripper { + return &mockTransport{} +} + +// Implement http.RoundTripper +func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // Create mocked http.Response + response := &http.Response{ + Header: make(http.Header), + Request: req, + StatusCode: http.StatusOK, + } + response.Header.Set("Content-Type", "application/json") + responseBody := "" + log.Printf("---> %s", req.URL.Path) + + /*FAKE PARSER*/ + if resp, ok := responseByPath[req.URL.Path]; ok { + responseBody = resp + } else { + log.Fatalf("unexpected url :/ %s", req.URL.Path) + } + + response.Body = io.NopCloser(strings.NewReader(responseBody)) + return response, nil +} + +func fileToStringX(path string) string { + if f, err := os.Open(path); err == nil { + defer f.Close() + if data, err := io.ReadAll(f); err == nil { + return strings.ReplaceAll(string(data), "\r\n", "\n") + } else { + panic(err) + } + } else { + panic(err) + } +} + +func resetResponseByPath() { + responseByPath = map[string]string{ + "/master/parsers/s01-parse/crowdsecurity/foobar_parser.yaml": fileToStringX("./tests/foobar_parser.yaml"), + "/master/parsers/s01-parse/crowdsecurity/foobar_subparser.yaml": fileToStringX("./tests/foobar_parser.yaml"), + "/master/collections/crowdsecurity/test_collection.yaml": fileToStringX("./tests/collection_v1.yaml"), + "/master/.index.json": fileToStringX("./tests/index1.json"), + "/master/scenarios/crowdsecurity/foobar_scenario.yaml": `filter: true +name: crowdsecurity/foobar_scenario`, + "/master/scenarios/crowdsecurity/barfoo_scenario.yaml": `filter: true +name: crowdsecurity/foobar_scenario`, + "/master/collections/crowdsecurity/foobar_subcollection.yaml": ` +blah: blalala +qwe: jejwejejw`, + "/master/collections/crowdsecurity/foobar.yaml": ` +blah: blalala +qwe: jejwejejw`, + } +} diff --git a/pkg/cwhub/download.go b/pkg/cwhub/download.go new file mode 100644 index 0000000..5f5ccf9 --- /dev/null +++ b/pkg/cwhub/download.go @@ -0,0 +1,277 @@ +package cwhub + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io" + "net/http" + "os" + "path" + "path/filepath" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +func UpdateHubIdx(hub *csconfig.Hub) error { + + bidx, err := DownloadHubIdx(hub) + if err != nil { + return errors.Wrap(err, "failed to download index") + } + ret, err := LoadPkgIndex(bidx) + if err != nil { + if !errors.Is(err, ReferenceMissingError) { + return errors.Wrap(err, "failed to read index") + } + } + hubIdx = ret + if err, _ := LocalSync(hub); err != nil { + return errors.Wrap(err, "failed to sync") + } + return nil +} + +func DownloadHubIdx(hub *csconfig.Hub) ([]byte, error) { + log.Debugf("fetching index from branch %s (%s)", HubBranch, fmt.Sprintf(RawFileURLTemplate, HubBranch, HubIndexFile)) + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf(RawFileURLTemplate, HubBranch, HubIndexFile), nil) + if err != nil { + return nil, errors.Wrap(err, "failed to build request for hub index") + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, errors.Wrap(err, "failed http request for hub index") + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("bad http code %d while requesting %s", resp.StatusCode, req.URL.String()) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "failed to read request answer for hub index") + } + + oldContent, err := os.ReadFile(hub.HubIndexFile) + if err != nil { + if !os.IsNotExist(err) { + log.Warningf("failed to read hub index: %s", err) + } + } else if bytes.Equal(body, oldContent) { + log.Info("hub index is up to date") + // write it anyway, can't hurt + } + + file, err := os.OpenFile(hub.HubIndexFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + + if err != nil { + return nil, errors.Wrap(err, "while opening hub index file") + } + defer file.Close() + + wsize, err := file.WriteString(string(body)) + if err != nil { + return nil, errors.Wrap(err, "while writing hub index file") + } + log.Infof("Wrote new %d bytes index to %s", wsize, hub.HubIndexFile) + return body, nil +} + +//DownloadLatest will download the latest version of Item to the tdir directory +func DownloadLatest(hub *csconfig.Hub, target Item, overwrite bool, updateOnly bool) (Item, error) { + var err error + + log.Debugf("Downloading %s %s", target.Type, target.Name) + if target.Type != COLLECTIONS { + if !target.Installed && updateOnly && target.Downloaded { + log.Debugf("skipping upgrade of %s : not installed", target.Name) + return target, nil + } + return DownloadItem(hub, target, overwrite) + } + + // collection + var tmp = [][]string{target.Parsers, target.PostOverflows, target.Scenarios, target.Collections} + for idx, ptr := range tmp { + ptrtype := ItemTypes[idx] + for _, p := range ptr { + val, ok := hubIdx[ptrtype][p] + if !ok { + return target, fmt.Errorf("required %s %s of %s doesn't exist, abort", ptrtype, p, target.Name) + } + + if !val.Installed && updateOnly && val.Downloaded { + log.Debugf("skipping upgrade of %s : not installed", target.Name) + continue + } + + log.Debugf("Download %s sub-item : %s %s (%t -> %t)", target.Name, ptrtype, p, target.Installed, updateOnly) + //recurse as it's a collection + if ptrtype == COLLECTIONS { + log.Tracef("collection, recurse") + hubIdx[ptrtype][p], err = DownloadLatest(hub, val, overwrite, updateOnly) + if err != nil { + return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", val.Name)) + } + } + item, err := DownloadItem(hub, val, overwrite) + if err != nil { + return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", val.Name)) + } + + // We need to enable an item when it has been added to a collection since latest release of the collection. + // We check if val.Downloaded is false because maybe the item has been disabled by the user. + if !item.Installed && !val.Downloaded { + if item, err = EnableItem(hub, item); err != nil { + return target, errors.Wrapf(err, "enabling '%s'", item.Name) + } + } + hubIdx[ptrtype][p] = item + } + } + target, err = DownloadItem(hub, target, overwrite) + if err != nil { + return target, fmt.Errorf("failed to download item : %s", err) + } + return target, nil +} + +func DownloadItem(hub *csconfig.Hub, target Item, overwrite bool) (Item, error) { + var tdir = hub.HubDir + var dataFolder = hub.DataDir + /*if user didn't --force, don't overwrite local, tainted, up-to-date files*/ + if !overwrite { + if target.Tainted { + log.Debugf("%s : tainted, not updated", target.Name) + return target, nil + } + if target.UpToDate { + log.Debugf("%s : up-to-date, not updated", target.Name) + // We still have to check if data files are present + } + } + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf(RawFileURLTemplate, HubBranch, target.RemotePath), nil) + if err != nil { + return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", req.URL.String())) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return target, errors.Wrap(err, fmt.Sprintf("while downloading %s", req.URL.String())) + } + if resp.StatusCode != http.StatusOK { + return target, fmt.Errorf("bad http code %d for %s", resp.StatusCode, req.URL.String()) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return target, errors.Wrap(err, fmt.Sprintf("while reading %s", req.URL.String())) + } + h := sha256.New() + if _, err := h.Write(body); err != nil { + return target, errors.Wrap(err, fmt.Sprintf("while hashing %s", target.Name)) + } + meow := fmt.Sprintf("%x", h.Sum(nil)) + if meow != target.Versions[target.Version].Digest { + log.Errorf("Downloaded version doesn't match index, please 'hub update'") + log.Debugf("got %s, expected %s", meow, target.Versions[target.Version].Digest) + return target, fmt.Errorf("invalid download hash for %s", target.Name) + } + //all good, install + //check if parent dir exists + tmpdirs := strings.Split(tdir+"/"+target.RemotePath, "/") + parent_dir := strings.Join(tmpdirs[:len(tmpdirs)-1], "/") + + /*ensure that target file is within target dir*/ + finalPath, err := filepath.Abs(tdir + "/" + target.RemotePath) + if err != nil { + return target, errors.Wrapf(err, "Abs error on %s", tdir+"/"+target.RemotePath) + } + if !strings.HasPrefix(finalPath, tdir) { + return target, fmt.Errorf("path %s escapes %s, abort", target.RemotePath, tdir) + } + /*check dir*/ + if _, err = os.Stat(parent_dir); os.IsNotExist(err) { + log.Debugf("%s doesn't exist, create", parent_dir) + if err := os.MkdirAll(parent_dir, os.ModePerm); err != nil { + return target, errors.Wrap(err, "while creating parent directories") + } + } + /*check actual file*/ + if _, err = os.Stat(finalPath); !os.IsNotExist(err) { + log.Warningf("%s : overwrite", target.Name) + log.Debugf("target: %s/%s", tdir, target.RemotePath) + } else { + log.Infof("%s : OK", target.Name) + } + + f, err := os.OpenFile(tdir+"/"+target.RemotePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return target, errors.Wrap(err, "while opening file") + } + defer f.Close() + _, err = f.WriteString(string(body)) + if err != nil { + return target, errors.Wrap(err, "while writing file") + } + target.Downloaded = true + target.Tainted = false + target.UpToDate = true + + if err = downloadData(dataFolder, overwrite, bytes.NewReader(body)); err != nil { + return target, errors.Wrapf(err, "while downloading data for %s", target.FileName) + } + + hubIdx[target.Type][target.Name] = target + return target, nil +} + +func DownloadDataIfNeeded(hub *csconfig.Hub, target Item, force bool) error { + var ( + dataFolder = hub.DataDir + itemFile *os.File + err error + ) + itemFilePath := fmt.Sprintf("%s/%s/%s/%s", hub.ConfigDir, target.Type, target.Stage, target.FileName) + if itemFile, err = os.Open(itemFilePath); err != nil { + return errors.Wrapf(err, "while opening %s", itemFilePath) + } + defer itemFile.Close() + if err = downloadData(dataFolder, force, itemFile); err != nil { + return errors.Wrapf(err, "while downloading data for %s", itemFilePath) + } + return nil +} + +func downloadData(dataFolder string, force bool, reader io.Reader) error { + var err error + dec := yaml.NewDecoder(reader) + + for { + data := &types.DataSet{} + err = dec.Decode(data) + if err != nil { + if err != io.EOF { + return errors.Wrap(err, "while reading file") + } + break + } + + download := false + for _, dataS := range data.Data { + if _, err := os.Stat(path.Join(dataFolder, dataS.DestPath)); os.IsNotExist(err) { + download = true + } + } + if download || force { + err = types.GetData(data.Data, dataFolder) + if err != nil { + return errors.Wrap(err, "while getting data") + } + } + } + return nil +} diff --git a/pkg/cwhub/download_test.go b/pkg/cwhub/download_test.go new file mode 100644 index 0000000..156c413 --- /dev/null +++ b/pkg/cwhub/download_test.go @@ -0,0 +1,42 @@ +package cwhub + +import ( + "fmt" + "strings" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + log "github.com/sirupsen/logrus" +) + +func TestDownloadHubIdx(t *testing.T) { + back := RawFileURLTemplate + //bad url template + fmt.Println("Test 'bad URL'") + RawFileURLTemplate = "x" + ret, err := DownloadHubIdx(&csconfig.Hub{}) + if err == nil || !strings.HasPrefix(fmt.Sprintf("%s", err), "failed to build request for hub index: parse ") { + log.Errorf("unexpected error %s", err) + } + fmt.Printf("->%+v", ret) + + //bad domain + fmt.Println("Test 'bad domain'") + RawFileURLTemplate = "https://baddomain/%s/%s" + ret, err = DownloadHubIdx(&csconfig.Hub{}) + if err == nil || !strings.HasPrefix(fmt.Sprintf("%s", err), "failed http request for hub index: Get") { + log.Errorf("unexpected error %s", err) + } + fmt.Printf("->%+v", ret) + + //bad target path + fmt.Println("Test 'bad target path'") + RawFileURLTemplate = back + ret, err = DownloadHubIdx(&csconfig.Hub{HubIndexFile: "/does/not/exist/index.json"}) + if err == nil || !strings.HasPrefix(fmt.Sprintf("%s", err), "while opening hub index file: open /does/not/exist/index.json:") { + log.Errorf("unexpected error %s", err) + } + + RawFileURLTemplate = back + fmt.Printf("->%+v", ret) +} diff --git a/pkg/cwhub/helpers.go b/pkg/cwhub/helpers.go new file mode 100644 index 0000000..31f6008 --- /dev/null +++ b/pkg/cwhub/helpers.go @@ -0,0 +1,225 @@ +package cwhub + +import ( + "fmt" + "path/filepath" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/enescakir/emoji" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "golang.org/x/mod/semver" +) + +// pick a hub branch corresponding to the current crowdsec version. +func chooseHubBranch() (string, error) { + latest, err := cwversion.Latest() + if err != nil { + return "master", err + } + + csVersion := cwversion.VersionStrip() + if csVersion == latest { + return "master", nil + } + + // if current version is greater than the latest we are in pre-release + if semver.Compare(csVersion, latest) == 1 { + log.Debugf("Your current crowdsec version seems to be a pre-release (%s)", csVersion) + return "master", nil + } + + if csVersion == "" { + log.Warning("Crowdsec version is not set, using master branch for the hub") + return "master", nil + } + + log.Warnf("Crowdsec is not the latest version. "+ + "Current version is '%s' and the latest stable version is '%s'. Please update it!", + csVersion, latest) + log.Warnf("As a result, you will not be able to use parsers/scenarios/collections "+ + "added to Crowdsec Hub after CrowdSec %s", latest) + return csVersion, nil +} + +// SetHubBranch sets the package variable that points to the hub branch. +func SetHubBranch() error { + // a branch is already set, or specified from the flags + if HubBranch != "" { + return nil + } + + // use the branch corresponding to the crowdsec version + branch, err := chooseHubBranch() + if err != nil { + return err + } + HubBranch = branch + log.Debugf("Using branch '%s' for the hub", HubBranch) + return nil +} + +func InstallItem(csConfig *csconfig.Config, name string, obtype string, force bool, downloadOnly bool) error { + it := GetItem(obtype, name) + if it == nil { + return fmt.Errorf("unable to retrieve item: %s", name) + } + + item := *it + if downloadOnly && item.Downloaded && item.UpToDate { + log.Warningf("%s is already downloaded and up-to-date", item.Name) + if !force { + return nil + } + } + + item, err := DownloadLatest(csConfig.Hub, item, force, true) + if err != nil { + return errors.Wrapf(err, "while downloading %s", item.Name) + } + + if err := AddItem(obtype, item); err != nil { + return errors.Wrapf(err, "while adding %s", item.Name) + } + + if downloadOnly { + log.Infof("Downloaded %s to %s", item.Name, filepath.Join(csConfig.Hub.HubDir, item.RemotePath)) + return nil + } + + item, err = EnableItem(csConfig.Hub, item) + if err != nil { + return errors.Wrapf(err, "while enabling %s", item.Name) + } + + if err := AddItem(obtype, item); err != nil { + return errors.Wrapf(err, "while adding %s", item.Name) + } + + log.Infof("Enabled %s", item.Name) + + return nil +} + +// XXX this must return errors instead of log.Fatal +func RemoveMany(csConfig *csconfig.Config, itemType string, name string, all bool, purge bool, forceAction bool) { + var ( + err error + disabled int + ) + + if name != "" { + it := GetItem(itemType, name) + if it == nil { + log.Fatalf("unable to retrieve: %s", name) + } + + item := *it + item, err = DisableItem(csConfig.Hub, item, purge, forceAction) + if err != nil { + log.Fatalf("unable to disable %s : %v", item.Name, err) + } + + if err := AddItem(itemType, item); err != nil { + log.Fatalf("unable to add %s: %v", item.Name, err) + } + return + } + + if !all { + log.Fatal("removing item: no item specified") + } + + // remove all + for _, v := range GetItemMap(itemType) { + if !v.Installed { + continue + } + v, err = DisableItem(csConfig.Hub, v, purge, forceAction) + if err != nil { + log.Fatalf("unable to disable %s : %v", v.Name, err) + } + + if err := AddItem(itemType, v); err != nil { + log.Fatalf("unable to add %s: %v", v.Name, err) + } + disabled++ + } + log.Infof("Disabled %d items", disabled) +} + +func UpgradeConfig(csConfig *csconfig.Config, itemType string, name string, force bool) { + var ( + err error + updated int + found bool + ) + + for _, v := range GetItemMap(itemType) { + if name != "" && name != v.Name { + continue + } + + if !v.Installed { + log.Tracef("skip %s, not installed", v.Name) + continue + } + + if !v.Downloaded { + log.Warningf("%s : not downloaded, please install.", v.Name) + continue + } + + found = true + + if v.UpToDate { + log.Infof("%s : up-to-date", v.Name) + + if err = DownloadDataIfNeeded(csConfig.Hub, v, force); err != nil { + log.Fatalf("%s : download failed : %v", v.Name, err) + } + + if !force { + continue + } + } + + v, err = DownloadLatest(csConfig.Hub, v, force, true) + if err != nil { + log.Fatalf("%s : download failed : %v", v.Name, err) + } + + if !v.UpToDate { + if v.Tainted { + log.Infof("%v %s is tainted, --force to overwrite", emoji.Warning, v.Name) + } else if v.Local { + log.Infof("%v %s is local", emoji.Prohibited, v.Name) + } + } else { + // this is used while scripting to know if the hub has been upgraded + // and a configuration reload is required + fmt.Printf("updated %s\n", v.Name) + log.Infof("%v %s : updated", emoji.Package, v.Name) + updated++ + } + + if err := AddItem(itemType, v); err != nil { + log.Fatalf("unable to add %s: %v", v.Name, err) + } + } + + if !found && name == "" { + log.Infof("No %s installed, nothing to upgrade", itemType) + } else if !found { + log.Errorf("Item '%s' not found in hub", name) + } else if updated == 0 && found { + if name == "" { + log.Infof("All %s are already up-to-date", itemType) + } else { + log.Infof("Item '%s' is up-to-date", name) + } + } else if updated != 0 { + log.Infof("Upgraded %d items", updated) + } +} diff --git a/pkg/cwhub/helpers_test.go b/pkg/cwhub/helpers_test.go new file mode 100644 index 0000000..4cb1165 --- /dev/null +++ b/pkg/cwhub/helpers_test.go @@ -0,0 +1,158 @@ +package cwhub + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +//Download index, install collection. Add scenario to collection (hub-side), update index, upgrade collection +// We expect the new scenario to be installed +func TestUpgradeConfigNewScenarioInCollection(t *testing.T) { + cfg := test_prepenv() + + // fresh install of collection + getHubIdxOrFail(t) + + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Downloaded) + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Installed) + + require.NoError(t, InstallItem(cfg, "crowdsecurity/test_collection", COLLECTIONS, false, false)) + + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Downloaded) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Installed) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].UpToDate) + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Tainted) + + // This is the sceanrio that gets added in next version of collection + require.False(t, hubIdx[SCENARIOS]["crowdsecurity/barfoo_scenario"].Downloaded) + require.False(t, hubIdx[SCENARIOS]["crowdsecurity/barfoo_scenario"].Installed) + + assertCollectionDepsInstalled(t, "crowdsecurity/test_collection") + + // collection receives an update. It now adds new scenario "crowdsecurity/barfoo_scenario" + pushUpdateToCollectionInHub() + + if err := UpdateHubIdx(cfg.Hub); err != nil { + t.Fatalf("failed to download index : %s", err) + } + getHubIdxOrFail(t) + + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Downloaded) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Installed) + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].UpToDate) + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Tainted) + + UpgradeConfig(cfg, COLLECTIONS, "crowdsecurity/test_collection", false) + assertCollectionDepsInstalled(t, "crowdsecurity/test_collection") + + require.True(t, hubIdx[SCENARIOS]["crowdsecurity/barfoo_scenario"].Downloaded) + require.True(t, hubIdx[SCENARIOS]["crowdsecurity/barfoo_scenario"].Installed) + +} + +// Install a collection, disable a scenario. +// Upgrade should install should not enable/download the disabled scenario. +func TestUpgradeConfigInDisabledSceanarioShouldNotBeInstalled(t *testing.T) { + cfg := test_prepenv() + + // fresh install of collection + getHubIdxOrFail(t) + + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Downloaded) + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Installed) + require.False(t, hubIdx[SCENARIOS]["crowdsecurity/foobar_scenario"].Installed) + + require.NoError(t, InstallItem(cfg, "crowdsecurity/test_collection", COLLECTIONS, false, false)) + + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Downloaded) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Installed) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].UpToDate) + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Tainted) + require.True(t, hubIdx[SCENARIOS]["crowdsecurity/foobar_scenario"].Installed) + assertCollectionDepsInstalled(t, "crowdsecurity/test_collection") + + RemoveMany(cfg, SCENARIOS, "crowdsecurity/foobar_scenario", false, false, false) + getHubIdxOrFail(t) + // scenario referenced by collection was deleted hence, collection should be tainted + require.False(t, hubIdx[SCENARIOS]["crowdsecurity/foobar_scenario"].Installed) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Tainted) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Downloaded) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Installed) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].UpToDate) + + if err := UpdateHubIdx(cfg.Hub); err != nil { + t.Fatalf("failed to download index : %s", err) + } + + UpgradeConfig(cfg, COLLECTIONS, "crowdsecurity/test_collection", false) + + getHubIdxOrFail(t) + require.False(t, hubIdx[SCENARIOS]["crowdsecurity/foobar_scenario"].Installed) +} + +func getHubIdxOrFail(t *testing.T) { + if err := GetHubIdx(getTestCfg().Hub); err != nil { + t.Fatalf("failed to load hub index") + } +} + +// Install a collection. Disable a referenced scenario. Publish new version of collection with new scenario +// Upgrade should not enable/download the disabled scenario. +// Upgrade should install and enable the newly added scenario. +func TestUpgradeConfigNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *testing.T) { + cfg := test_prepenv() + + // fresh install of collection + getHubIdxOrFail(t) + + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Downloaded) + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Installed) + require.False(t, hubIdx[SCENARIOS]["crowdsecurity/foobar_scenario"].Installed) + + require.NoError(t, InstallItem(cfg, "crowdsecurity/test_collection", COLLECTIONS, false, false)) + + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Downloaded) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Installed) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].UpToDate) + require.False(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Tainted) + require.True(t, hubIdx[SCENARIOS]["crowdsecurity/foobar_scenario"].Installed) + assertCollectionDepsInstalled(t, "crowdsecurity/test_collection") + + RemoveMany(cfg, SCENARIOS, "crowdsecurity/foobar_scenario", false, false, false) + getHubIdxOrFail(t) + // scenario referenced by collection was deleted hence, collection should be tainted + require.False(t, hubIdx[SCENARIOS]["crowdsecurity/foobar_scenario"].Installed) + require.True(t, hubIdx[SCENARIOS]["crowdsecurity/foobar_scenario"].Downloaded) // this fails + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Tainted) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Downloaded) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].Installed) + require.True(t, hubIdx[COLLECTIONS]["crowdsecurity/test_collection"].UpToDate) + + // collection receives an update. It now adds new scenario "crowdsecurity/barfoo_scenario" + // we now attempt to upgrade the collection, however it shouldn't install the foobar_scenario + // we just removed. Nor should it install the newly added sceanrio + pushUpdateToCollectionInHub() + + if err := UpdateHubIdx(cfg.Hub); err != nil { + t.Fatalf("failed to download index : %s", err) + } + require.False(t, hubIdx[SCENARIOS]["crowdsecurity/foobar_scenario"].Installed) + getHubIdxOrFail(t) + + UpgradeConfig(cfg, COLLECTIONS, "crowdsecurity/test_collection", false) + getHubIdxOrFail(t) + require.False(t, hubIdx[SCENARIOS]["crowdsecurity/foobar_scenario"].Installed) + require.True(t, hubIdx[SCENARIOS]["crowdsecurity/barfoo_scenario"].Installed) +} + +func assertCollectionDepsInstalled(t *testing.T, collection string) { + t.Helper() + c := hubIdx[COLLECTIONS][collection] + require.NoError(t, CollecDepsCheck(&c)) +} + +func pushUpdateToCollectionInHub() { + responseByPath["/master/.index.json"] = fileToStringX("./tests/index2.json") + responseByPath["/master/collections/crowdsecurity/test_collection.yaml"] = fileToStringX("./tests/collection_v2.yaml") +} diff --git a/pkg/cwhub/install.go b/pkg/cwhub/install.go new file mode 100644 index 0000000..efc92ae --- /dev/null +++ b/pkg/cwhub/install.go @@ -0,0 +1,202 @@ +package cwhub + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" +) + +func purgeItem(hub *csconfig.Hub, target Item) (Item, error) { + var hdir = hub.HubDir + hubpath := hdir + "/" + target.RemotePath + + // disable hub file + if err := os.Remove(hubpath); err != nil { + return target, errors.Wrap(err, "while removing file") + } + + target.Downloaded = false + log.Infof("Removed source file [%s] : %s", target.Name, hubpath) + hubIdx[target.Type][target.Name] = target + return target, nil +} + +//DisableItem to disable an item managed by the hub, removes the symlink if purge is true +func DisableItem(hub *csconfig.Hub, target Item, purge bool, force bool) (Item, error) { + var tdir = hub.ConfigDir + var hdir = hub.HubDir + var err error + + if !target.Installed { + if purge { + target, err = purgeItem(hub, target) + if err != nil { + return target, err + } + } + return target, nil + } + + syml, err := filepath.Abs(tdir + "/" + target.Type + "/" + target.Stage + "/" + target.FileName) + if err != nil { + return Item{}, err + } + + if target.Local { + return target, fmt.Errorf("%s isn't managed by hub. Please delete manually", target.Name) + } + + if target.Tainted && !force { + return target, fmt.Errorf("%s is tainted, use '--force' to overwrite", target.Name) + } + + /*for a COLLECTIONS, disable sub-items*/ + if target.Type == COLLECTIONS { + var tmp = [][]string{target.Parsers, target.PostOverflows, target.Scenarios, target.Collections} + for idx, ptr := range tmp { + ptrtype := ItemTypes[idx] + for _, p := range ptr { + if val, ok := hubIdx[ptrtype][p]; ok { + // check if the item doesn't belong to another collection before removing it + toRemove := true + for _, collection := range val.BelongsToCollections { + if collection != target.Name { + toRemove = false + break + } + } + if toRemove { + hubIdx[ptrtype][p], err = DisableItem(hub, val, purge, force) + if err != nil { + return target, errors.Wrap(err, fmt.Sprintf("while disabling %s", p)) + } + } else { + log.Infof("%s was not removed because it belongs to another collection", val.Name) + } + } else { + log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, target.Name) + } + } + } + } + + stat, err := os.Lstat(syml) + if os.IsNotExist(err) { + if !purge && !force { //we only accept to "delete" non existing items if it's a purge + return target, fmt.Errorf("can't delete %s : %s doesn't exist", target.Name, syml) + } + } else { + //if it's managed by hub, it's a symlink to csconfig.GConfig.hub.HubDir / ... + if stat.Mode()&os.ModeSymlink == 0 { + log.Warningf("%s (%s) isn't a symlink, can't disable", target.Name, syml) + return target, fmt.Errorf("%s isn't managed by hub", target.Name) + } + hubpath, err := os.Readlink(syml) + if err != nil { + return target, errors.Wrap(err, "while reading symlink") + } + absPath, err := filepath.Abs(hdir + "/" + target.RemotePath) + if err != nil { + return target, errors.Wrap(err, "while abs path") + } + if hubpath != absPath { + log.Warningf("%s (%s) isn't a symlink to %s", target.Name, syml, absPath) + return target, fmt.Errorf("%s isn't managed by hub", target.Name) + } + + //remove the symlink + if err = os.Remove(syml); err != nil { + return target, errors.Wrap(err, "while removing symlink") + } + log.Infof("Removed symlink [%s] : %s", target.Name, syml) + } + target.Installed = false + + if purge { + target, err = purgeItem(hub, target) + if err != nil { + return target, err + } + } + hubIdx[target.Type][target.Name] = target + return target, nil +} + +// creates symlink between actual config file at hub.HubDir and hub.ConfigDir +// Handles collections recursively +func EnableItem(hub *csconfig.Hub, target Item) (Item, error) { + var tdir = hub.ConfigDir + var hdir = hub.HubDir + var err error + parent_dir := filepath.Clean(tdir + "/" + target.Type + "/" + target.Stage + "/") + /*create directories if needed*/ + if target.Installed { + if target.Tainted { + return target, fmt.Errorf("%s is tainted, won't enable unless --force", target.Name) + } + if target.Local { + return target, fmt.Errorf("%s is local, won't enable", target.Name) + } + /* if it's a collection, check sub-items even if the collection file itself is up-to-date */ + if target.UpToDate && target.Type != COLLECTIONS { + log.Tracef("%s is installed and up-to-date, skip.", target.Name) + return target, nil + } + } + if _, err := os.Stat(parent_dir); os.IsNotExist(err) { + log.Printf("%s doesn't exist, create", parent_dir) + if err := os.MkdirAll(parent_dir, os.ModePerm); err != nil { + return target, errors.Wrap(err, "while creating directory") + } + } + + /*install sub-items if it's a collection*/ + if target.Type == COLLECTIONS { + var tmp = [][]string{target.Parsers, target.PostOverflows, target.Scenarios, target.Collections} + for idx, ptr := range tmp { + ptrtype := ItemTypes[idx] + for _, p := range ptr { + val, ok := hubIdx[ptrtype][p] + if !ok { + return target, fmt.Errorf("required %s %s of %s doesn't exist, abort.", ptrtype, p, target.Name) + } + + hubIdx[ptrtype][p], err = EnableItem(hub, val) + if err != nil { + return target, errors.Wrap(err, fmt.Sprintf("while installing %s", p)) + } + } + } + } + + // check if file already exists where it should in configdir (eg /etc/crowdsec/collections/) + if _, err := os.Lstat(parent_dir + "/" + target.FileName); !os.IsNotExist(err) { + log.Printf("%s already exists.", parent_dir+"/"+target.FileName) + return target, nil + } + + //tdir+target.RemotePath + srcPath, err := filepath.Abs(hdir + "/" + target.RemotePath) + if err != nil { + return target, errors.Wrap(err, "while getting source path") + } + + dstPath, err := filepath.Abs(parent_dir + "/" + target.FileName) + if err != nil { + return target, errors.Wrap(err, "while getting destination path") + } + + if err = os.Symlink(srcPath, dstPath); err != nil { + return target, errors.Wrap(err, fmt.Sprintf("while creating symlink from %s to %s", srcPath, dstPath)) + } + + log.Printf("Enabled %s : %s", target.Type, target.Name) + target.Installed = true + hubIdx[target.Type][target.Name] = target + return target, nil +} diff --git a/pkg/cwhub/loader.go b/pkg/cwhub/loader.go new file mode 100644 index 0000000..496ef42 --- /dev/null +++ b/pkg/cwhub/loader.go @@ -0,0 +1,424 @@ +package cwhub + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "golang.org/x/mod/semver" +) + +/*the walk/parser_visit function can't receive extra args*/ +var hubdir, installdir string + +func parser_visit(path string, f os.DirEntry, err error) error { + + var target Item + var local bool + var hubpath string + var inhub bool + var fname string + var ftype string + var fauthor string + var stage string + + if err != nil { + log.Debugf("while syncing hub dir: %s", err) + // there is a path error, we ignore the file + return nil + } + + path, err = filepath.Abs(path) + if err != nil { + return err + } + //we only care about files + if f == nil || f.IsDir() { + return nil + } + //we only care about yaml files + if !strings.HasSuffix(f.Name(), ".yaml") && !strings.HasSuffix(f.Name(), ".yml") { + return nil + } + + subs := strings.Split(path, string(os.PathSeparator)) + + log.Tracef("path:%s, hubdir:%s, installdir:%s", path, hubdir, installdir) + log.Tracef("subs:%v", subs) + /*we're in hub (~/.hub/hub/)*/ + if strings.HasPrefix(path, hubdir) { + log.Tracef("in hub dir") + inhub = true + //.../hub/parsers/s00-raw/crowdsec/skip-pretag.yaml + //.../hub/scenarios/crowdsec/ssh_bf.yaml + //.../hub/profiles/crowdsec/linux.yaml + if len(subs) < 4 { + log.Fatalf("path is too short : %s (%d)", path, len(subs)) + } + fname = subs[len(subs)-1] + fauthor = subs[len(subs)-2] + stage = subs[len(subs)-3] + ftype = subs[len(subs)-4] + } else if strings.HasPrefix(path, installdir) { /*we're in install /etc/crowdsec//... */ + log.Tracef("in install dir") + if len(subs) < 3 { + log.Fatalf("path is too short : %s (%d)", path, len(subs)) + } + ///.../config/parser/stage/file.yaml + ///.../config/postoverflow/stage/file.yaml + ///.../config/scenarios/scenar.yaml + ///.../config/collections/linux.yaml //file is empty + fname = subs[len(subs)-1] + stage = subs[len(subs)-2] + ftype = subs[len(subs)-3] + fauthor = "" + } else { + return fmt.Errorf("file '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubdir, installdir) + } + + log.Tracef("stage:%s ftype:%s", stage, ftype) + //log.Printf("%s -> name:%s stage:%s", path, fname, stage) + if stage == SCENARIOS { + ftype = SCENARIOS + stage = "" + } else if stage == COLLECTIONS { + ftype = COLLECTIONS + stage = "" + } else if ftype != PARSERS && ftype != PARSERS_OVFLW /*its a PARSER / PARSER_OVFLW with a stage */ { + return fmt.Errorf("unknown configuration type for file '%s'", path) + } + + log.Tracef("CORRECTED [%s] by [%s] in stage [%s] of type [%s]", fname, fauthor, stage, ftype) + + /* + we can encounter 'collections' in the form of a symlink : + /etc/crowdsec/.../collections/linux.yaml -> ~/.hub/hub/collections/.../linux.yaml + when the collection is installed, both files are created + */ + //non symlinks are local user files or hub files + if f.Type() & os.ModeSymlink == 0 { + local = true + log.Tracef("%s isn't a symlink", path) + } else { + hubpath, err = os.Readlink(path) + if err != nil { + return fmt.Errorf("unable to read symlink of %s", path) + } + //the symlink target doesn't exist, user might have removed ~/.hub/hub/...yaml without deleting /etc/crowdsec/....yaml + _, err := os.Lstat(hubpath) + if os.IsNotExist(err) { + log.Infof("%s is a symlink to %s that doesn't exist, deleting symlink", path, hubpath) + //remove the symlink + if err = os.Remove(path); err != nil { + return fmt.Errorf("failed to unlink %s: %+v", path, err) + } + return nil + } + log.Tracef("%s points to %s", path, hubpath) + } + + //if it's not a symlink and not in hub, it's a local file, don't bother + if local && !inhub { + log.Tracef("%s is a local file, skip", path) + skippedLocal++ + // log.Printf("local scenario, skip.") + target.Name = fname + target.Stage = stage + target.Installed = true + target.Type = ftype + target.Local = true + target.LocalPath = path + target.UpToDate = true + _, target.FileName = filepath.Split(path) + + hubIdx[ftype][fname] = target + return nil + } + //try to find which configuration item it is + log.Tracef("check [%s] of %s", fname, ftype) + + match := false + for k, v := range hubIdx[ftype] { + log.Tracef("check [%s] vs [%s] : %s", fname, v.RemotePath, ftype+"/"+stage+"/"+fname+".yaml") + if fname != v.FileName { + log.Tracef("%s != %s (filename)", fname, v.FileName) + continue + } + //wrong stage + if v.Stage != stage { + continue + } + /*if we are walking hub dir, just mark present files as downloaded*/ + if inhub { + //wrong author + if fauthor != v.Author { + continue + } + //wrong file + if CheckName(v.Name, fauthor, fname) { + continue + } + + if path == hubdir+"/"+v.RemotePath { + log.Tracef("marking %s as downloaded", v.Name) + v.Downloaded = true + } + } else if CheckSuffix(hubpath, v.RemotePath) { + //wrong file + /////.yaml + continue + } + sha, err := getSHA256(path) + if err != nil { + log.Fatalf("Failed to get sha of %s : %v", path, err) + } + //let's reverse sort the versions to deal with hash collisions (#154) + versions := make([]string, 0, len(v.Versions)) + for k := range v.Versions { + versions = append(versions, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(versions))) + + for _, version := range versions { + val := v.Versions[version] + if sha != val.Digest { + //log.Printf("matching filenames, wrong hash %s != %s -- %s", sha, val.Digest, spew.Sdump(v)) + continue + } + /*we got an exact match, update struct*/ + if !inhub { + log.Tracef("found exact match for %s, version is %s, latest is %s", v.Name, version, v.Version) + v.LocalPath = path + v.LocalVersion = version + v.Tainted = false + v.Downloaded = true + /*if we're walking the hub, present file doesn't means installed file*/ + v.Installed = true + v.LocalHash = sha + _, target.FileName = filepath.Split(path) + } else { + v.Downloaded = true + v.LocalHash = sha + } + if version == v.Version { + log.Tracef("%s is up-to-date", v.Name) + v.UpToDate = true + } + match = true + break + } + if !match { + log.Tracef("got tainted match for %s : %s", v.Name, path) + skippedTainted += 1 + //the file and the stage is right, but the hash is wrong, it has been tainted by user + if !inhub { + v.LocalPath = path + v.Installed = true + } + v.UpToDate = false + v.LocalVersion = "?" + v.Tainted = true + v.LocalHash = sha + _, target.FileName = filepath.Split(path) + + } + //update the entry if appropriate + // if _, ok := hubIdx[ftype][k]; !ok || !inhub || v.D { + // fmt.Printf("Updating %s", k) + // hubIdx[ftype][k] = v + // } else if !inhub { + + // } else if + hubIdx[ftype][k] = v + return nil + } + log.Infof("Ignoring file %s of type %s", path, ftype) + return nil +} + +func CollecDepsCheck(v *Item) error { + + if GetVersionStatus(v) != 0 { //not up-to-date + log.Debugf("%s dependencies not checked : not up-to-date", v.Name) + return nil + } + + /*if it's a collection, ensure all the items are installed, or tag it as tainted*/ + if v.Type == COLLECTIONS { + log.Tracef("checking submembers of %s installed:%t", v.Name, v.Installed) + var tmp = [][]string{v.Parsers, v.PostOverflows, v.Scenarios, v.Collections} + for idx, ptr := range tmp { + ptrtype := ItemTypes[idx] + for _, p := range ptr { + val, ok := hubIdx[ptrtype][p] + if !ok { + log.Fatalf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, v.Name) + } + log.Tracef("check %s installed:%t", val.Name, val.Installed) + if !v.Installed { + continue + } + if val.Type == COLLECTIONS { + log.Tracef("collec, recurse.") + if err := CollecDepsCheck(&val); err != nil { + return fmt.Errorf("sub collection %s is broken : %s", val.Name, err) + } + hubIdx[ptrtype][p] = val + } + + //propagate the state of sub-items to set + if val.Tainted { + v.Tainted = true + return fmt.Errorf("tainted %s %s, tainted.", ptrtype, p) + } + if !val.Installed && v.Installed { + v.Tainted = true + return fmt.Errorf("missing %s %s, tainted.", ptrtype, p) + } + if !val.UpToDate { + v.UpToDate = false + return fmt.Errorf("outdated %s %s", ptrtype, p) + } + skip := false + for idx := range val.BelongsToCollections { + if val.BelongsToCollections[idx] == v.Name { + skip = true + } + } + if !skip { + val.BelongsToCollections = append(val.BelongsToCollections, v.Name) + } + hubIdx[ptrtype][p] = val + log.Tracef("checking for %s - tainted:%t uptodate:%t", p, v.Tainted, v.UpToDate) + } + } + } + return nil +} + +func SyncDir(hub *csconfig.Hub, dir string) (error, []string) { + hubdir = hub.HubDir + installdir = hub.ConfigDir + warnings := []string{} + + /*For each, scan PARSERS, PARSERS_OVFLW, SCENARIOS and COLLECTIONS last*/ + for _, scan := range ItemTypes { + cpath, err := filepath.Abs(fmt.Sprintf("%s/%s", dir, scan)) + if err != nil { + log.Errorf("failed %s : %s", cpath, err) + } + err = filepath.WalkDir(cpath, parser_visit) + if err != nil { + return err, warnings + } + + } + + for k, v := range hubIdx[COLLECTIONS] { + if v.Installed { + versStat := GetVersionStatus(&v) + if versStat == 0 { //latest + if err := CollecDepsCheck(&v); err != nil { + warnings = append(warnings, fmt.Sprintf("dependency of %s : %s", v.Name, err)) + hubIdx[COLLECTIONS][k] = v + } + } else if versStat == 1 { //not up-to-date + warnings = append(warnings, fmt.Sprintf("update for collection %s available (currently:%s, latest:%s)", v.Name, v.LocalVersion, v.Version)) + } else { //version is higher than the highest available from hub? + warnings = append(warnings, fmt.Sprintf("collection %s is in the future (currently:%s, latest:%s)", v.Name, v.LocalVersion, v.Version)) + } + log.Debugf("installed (%s) - status:%d | installed:%s | latest : %s | full : %+v", v.Name, semver.Compare("v"+v.Version, "v"+v.LocalVersion), v.LocalVersion, v.Version, v.Versions) + } + } + return nil, warnings +} + +/* Updates the infos from HubInit() with the local state */ +func LocalSync(hub *csconfig.Hub) (error, []string) { + skippedLocal = 0 + skippedTainted = 0 + + err, warnings := SyncDir(hub, hub.ConfigDir) + if err != nil { + return fmt.Errorf("failed to scan %s : %s", hub.ConfigDir, err), warnings + } + err, _ = SyncDir(hub, hub.HubDir) + if err != nil { + return fmt.Errorf("failed to scan %s : %s", hub.HubDir, err), warnings + } + return nil, warnings +} + +func GetHubIdx(hub *csconfig.Hub) error { + if hub == nil { + return fmt.Errorf("no configuration found for hub") + } + log.Debugf("loading hub idx %s", hub.HubIndexFile) + bidx, err := os.ReadFile(hub.HubIndexFile) + if err != nil { + return errors.Wrap(err, "unable to read index file") + } + ret, err := LoadPkgIndex(bidx) + if err != nil { + if !errors.Is(err, ReferenceMissingError) { + log.Fatalf("Unable to load existing index : %v.", err) + } + return err + } + hubIdx = ret + err, _ = LocalSync(hub) + if err != nil { + log.Fatalf("Failed to sync Hub index with local deployment : %v", err) + } + return nil +} + +/*LoadPkgIndex loads a local .index.json file and returns the map of parsers/scenarios/collections associated*/ +func LoadPkgIndex(buff []byte) (map[string]map[string]Item, error) { + var err error + var RawIndex map[string]map[string]Item + var missingItems []string + + if err = json.Unmarshal(buff, &RawIndex); err != nil { + return nil, fmt.Errorf("failed to unmarshal index : %v", err) + } + + log.Debugf("%d item types in hub index", len(ItemTypes)) + /*Iterate over the different types to complete struct */ + for _, itemType := range ItemTypes { + /*complete struct*/ + log.Tracef("%d item", len(RawIndex[itemType])) + for idx, item := range RawIndex[itemType] { + item.Name = idx + item.Type = itemType + x := strings.Split(item.RemotePath, "/") + item.FileName = x[len(x)-1] + RawIndex[itemType][idx] = item + /*if it's a collection, check its sub-items are present*/ + //XX should be done later + if itemType == COLLECTIONS { + var tmp = [][]string{item.Parsers, item.PostOverflows, item.Scenarios, item.Collections} + for idx, ptr := range tmp { + ptrtype := ItemTypes[idx] + for _, p := range ptr { + if _, ok := RawIndex[ptrtype][p]; !ok { + log.Errorf("Referred %s %s in collection %s doesn't exist.", ptrtype, p, item.Name) + missingItems = append(missingItems, p) + } + } + } + } + } + } + if len(missingItems) > 0 { + return RawIndex, fmt.Errorf("%q : %w", missingItems, ReferenceMissingError) + } + + return RawIndex, nil +} diff --git a/pkg/cwhub/path_separator_windows.go b/pkg/cwhub/path_separator_windows.go new file mode 100644 index 0000000..42f61aa --- /dev/null +++ b/pkg/cwhub/path_separator_windows.go @@ -0,0 +1,23 @@ +package cwhub + +import ( + "path/filepath" + "strings" +) + +func CheckSuffix(hubpath string, remotePath string) bool { + newPath := filepath.ToSlash(hubpath) + if !strings.HasSuffix(newPath, remotePath) { + return true + } else { + return false + } +} + +func CheckName(vname string, fauthor string, fname string) bool { + if vname+".yaml" != fauthor+"/"+fname && vname+".yml" != fauthor+"/"+fname { + return true + } else { + return false + } +} diff --git a/pkg/cwhub/pathseparator.go b/pkg/cwhub/pathseparator.go new file mode 100644 index 0000000..0340697 --- /dev/null +++ b/pkg/cwhub/pathseparator.go @@ -0,0 +1,24 @@ +//go:build linux || freebsd || netbsd || openbsd || solaris || !windows +// +build linux freebsd netbsd openbsd solaris !windows + +package cwhub + +import "strings" + +const PathSeparator = "/" + +func CheckSuffix(hubpath string, remotePath string) bool { + if !strings.HasSuffix(hubpath, remotePath) { + return true + } else { + return false + } +} + +func CheckName(vname string, fauthor string, fname string) bool { + if vname+".yaml" != fauthor+"/"+fname && vname+".yml" != fauthor+"/"+fname { + return true + } else { + return false + } +} diff --git a/pkg/cwhub/tests/collection_v1.yaml b/pkg/cwhub/tests/collection_v1.yaml new file mode 100644 index 0000000..a72cf1c --- /dev/null +++ b/pkg/cwhub/tests/collection_v1.yaml @@ -0,0 +1,2 @@ +scenarios: + - crowdsecurity/foobar_scenario \ No newline at end of file diff --git a/pkg/cwhub/tests/collection_v2.yaml b/pkg/cwhub/tests/collection_v2.yaml new file mode 100644 index 0000000..7a16c4f --- /dev/null +++ b/pkg/cwhub/tests/collection_v2.yaml @@ -0,0 +1,3 @@ +scenarios: + - crowdsecurity/foobar_scenario + - crowdsecurity/barfoo_scenario \ No newline at end of file diff --git a/pkg/cwhub/tests/foobar_parser.yaml b/pkg/cwhub/tests/foobar_parser.yaml new file mode 100644 index 0000000..3b75274 --- /dev/null +++ b/pkg/cwhub/tests/foobar_parser.yaml @@ -0,0 +1,8 @@ +onsuccess: next_stage +filter: evt.Parsed.program == 'foobar_parser' +name: crowdsecurity/foobar_parser +#debug: true +description: A parser for foobar_parser WAF +grok: + name: foobar_parser + apply_on: message \ No newline at end of file diff --git a/pkg/cwhub/tests/index1.json b/pkg/cwhub/tests/index1.json new file mode 100644 index 0000000..a7e6ef6 --- /dev/null +++ b/pkg/cwhub/tests/index1.json @@ -0,0 +1,121 @@ +{ + "collections": { + "crowdsecurity/foobar": { + "path": "collections/crowdsecurity/foobar.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "786c9490e4dd234453e53aa9bb7d28c60668e31c3c0c71a7dd6d0abbfa60261a", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "foobar collection : foobar", + "author": "crowdsecurity", + "labels": null, + "collections": [ + "crowdsecurity/foobar_subcollection" + ], + "parsers": [ + "crowdsecurity/foobar_parser" + ], + "scenarios": [ + "crowdsecurity/foobar_scenario" + ] + }, + "crowdsecurity/test_collection": { + "path": "collections/crowdsecurity/test_collection.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "517d0f0764ab6eee9d00d31f50da2a6cdd2084232fea32a7cb9f1fe95e658f59", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "test_collection : foobar", + "author": "crowdsecurity", + "labels": null, + "scenarios": [ + "crowdsecurity/foobar_scenario" + ] + }, + "crowdsecurity/foobar_subcollection": { + "path": "collections/crowdsecurity/foobar_subcollection.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "786c9490e4dd234453e53aa9bb7d28c60668e31c3c0c71a7dd6d0abbfa60261a", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "foobar collection : foobar", + "author": "crowdsecurity", + "labels": null, + "parsers": [ + "crowdsecurity/foobar_subparser" + ] + } + }, + "parsers": { + "crowdsecurity/foobar_parser": { + "path": "parsers/s01-parse/crowdsecurity/foobar_parser.yaml", + "stage": "s01-parse", + "version": "0.1", + "versions": { + "0.1": { + "digest": "932973ba9ba99c98dbb27c207d4b3de36c9510d87cde82598b7f4b398cbdde83", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "A foobar parser", + "author": "crowdsecurity", + "labels": null + }, + "crowdsecurity/foobar_subparser": { + "path": "parsers/s01-parse/crowdsecurity/foobar_subparser.yaml", + "stage": "s01-parse", + "version": "0.1", + "versions": { + "0.1": { + "digest": "932973ba9ba99c98dbb27c207d4b3de36c9510d87cde82598b7f4b398cbdde83", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "A foobar parser", + "author": "crowdsecurity", + "labels": null + } + }, + "postoverflows": {}, + "scenarios": { + "crowdsecurity/foobar_scenario": { + "path": "scenarios/crowdsecurity/foobar_scenario.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "a76b389db944ca7a9e5a3f3ae61ee2d4ee98167164ec9b971174b1d44f5a01c6", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "a foobar scenario", + "author": "crowdsecurity", + "labels": { + "remediation": "true", + "scope": "ip", + "service": "http", + "type": "web_attack" + } + } + } +} \ No newline at end of file diff --git a/pkg/cwhub/tests/index2.json b/pkg/cwhub/tests/index2.json new file mode 100644 index 0000000..7f97ebf --- /dev/null +++ b/pkg/cwhub/tests/index2.json @@ -0,0 +1,146 @@ +{ + "collections": { + "crowdsecurity/foobar": { + "path": "collections/crowdsecurity/foobar.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "786c9490e4dd234453e53aa9bb7d28c60668e31c3c0c71a7dd6d0abbfa60261a", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "foobar collection : foobar", + "author": "crowdsecurity", + "labels": null, + "collections": [ + "crowdsecurity/foobar_subcollection" + ], + "parsers": [ + "crowdsecurity/foobar_parser" + ], + "scenarios": [ + "crowdsecurity/foobar_scenario" + ] + }, + "crowdsecurity/test_collection": { + "path": "collections/crowdsecurity/test_collection.yaml", + "version": "0.2", + "versions": { + "0.1": { + "digest": "517d0f0764ab6eee9d00d31f50da2a6cdd2084232fea32a7cb9f1fe95e658f59", + "deprecated": false + }, + "0.2": { + "digest": "045fbe3f4c4b60fb7c12e486138e901fe6008b5bada6b9f6199cb4760d8bf448", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "test_collection : foobar", + "author": "crowdsecurity", + "labels": null, + "scenarios": [ + "crowdsecurity/foobar_scenario", + "crowdsecurity/barfoo_scenario" + ] + }, + "crowdsecurity/foobar_subcollection": { + "path": "collections/crowdsecurity/foobar_subcollection.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "786c9490e4dd234453e53aa9bb7d28c60668e31c3c0c71a7dd6d0abbfa60261a", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "foobar collection : foobar", + "author": "crowdsecurity", + "labels": null, + "parsers": [ + "crowdsecurity/foobar_subparser" + ] + } + }, + "parsers": { + "crowdsecurity/foobar_parser": { + "path": "parsers/s01-parse/crowdsecurity/foobar_parser.yaml", + "stage": "s01-parse", + "version": "0.1", + "versions": { + "0.1": { + "digest": "932973ba9ba99c98dbb27c207d4b3de36c9510d87cde82598b7f4b398cbdde83", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "A foobar parser", + "author": "crowdsecurity", + "labels": null + }, + "crowdsecurity/foobar_subparser": { + "path": "parsers/s01-parse/crowdsecurity/foobar_subparser.yaml", + "stage": "s01-parse", + "version": "0.1", + "versions": { + "0.1": { + "digest": "932973ba9ba99c98dbb27c207d4b3de36c9510d87cde82598b7f4b398cbdde83", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "A foobar parser", + "author": "crowdsecurity", + "labels": null + } + }, + "postoverflows": {}, + "scenarios": { + "crowdsecurity/foobar_scenario": { + "path": "scenarios/crowdsecurity/foobar_scenario.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "a76b389db944ca7a9e5a3f3ae61ee2d4ee98167164ec9b971174b1d44f5a01c6", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "a foobar scenario", + "author": "crowdsecurity", + "labels": { + "remediation": "true", + "scope": "ip", + "service": "http", + "type": "web_attack" + } + }, + "crowdsecurity/barfoo_scenario": { + "path": "scenarios/crowdsecurity/barfoo_scenario.yaml", + "version": "0.1", + "versions": { + "0.1": { + "digest": "a76b389db944ca7a9e5a3f3ae61ee2d4ee98167164ec9b971174b1d44f5a01c6", + "deprecated": false + } + }, + "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", + "content": "bG9uZyBkZXNjcmlwdGlvbgo=", + "description": "a foobar scenario", + "author": "crowdsecurity", + "labels": { + "remediation": "true", + "scope": "ip", + "service": "http", + "type": "web_attack" + } + } + } +} \ No newline at end of file diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go new file mode 100644 index 0000000..4470b91 --- /dev/null +++ b/pkg/cwversion/version.go @@ -0,0 +1,104 @@ +package cwversion + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "runtime" + "strings" + + version "github.com/hashicorp/go-version" +) + +/* + +Given a version number MAJOR.MINOR.PATCH, increment the: + + MAJOR version when you make incompatible API changes, + MINOR version when you add functionality in a backwards compatible manner, and + PATCH version when you make backwards compatible bug fixes. + +Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format. + +*/ + +var ( + Version string // = "v0.0.0" + Codename string // = "SoumSoum" + BuildDate string // = "0000-00-00_00:00:00" + Tag string // = "dev" + GoVersion = runtime.Version()[2:] // = "1.13" + System = runtime.GOOS // = "linux" + Constraint_parser = ">= 1.0, <= 2.0" + Constraint_scenario = ">= 1.0, < 3.0" + Constraint_api = "v1" + Constraint_acquis = ">= 1.0, < 2.0" +) + +func ShowStr() string { + ret := "" + ret += fmt.Sprintf("version: %s-%s\n", Version, Tag) + ret += fmt.Sprintf("Codename: %s\n", Codename) + ret += fmt.Sprintf("BuildDate: %s\n", BuildDate) + ret += fmt.Sprintf("GoVersion: %s\n", GoVersion) + ret += fmt.Sprintf("Platform: %s\n", System) + return ret +} + +func Show() { + log.Printf("version: %s-%s", Version, Tag) + log.Printf("Codename: %s", Codename) + log.Printf("BuildDate: %s", BuildDate) + log.Printf("GoVersion: %s", GoVersion) + log.Printf("Platform: %s\n", System) + log.Printf("Constraint_parser: %s", Constraint_parser) + log.Printf("Constraint_scenario: %s", Constraint_scenario) + log.Printf("Constraint_api: %s", Constraint_api) + log.Printf("Constraint_acquis: %s", Constraint_acquis) +} + +func VersionStr() string { + return fmt.Sprintf("%s-%s-%s", Version, System, Tag) +} + +func VersionStrip() string { + version := strings.Split(Version, "-") + return version[0] +} + +func Statisfies(strvers string, constraint string) (bool, error) { + vers, err := version.NewVersion(strvers) + if err != nil { + return false, fmt.Errorf("failed to parse '%s' : %v", strvers, err) + } + constraints, err := version.NewConstraint(constraint) + if err != nil { + return false, fmt.Errorf("failed to parse constraint '%s'", constraint) + } + if !constraints.Check(vers) { + return false, nil + } + return true, nil +} + +// Latest return latest crowdsec version based on github +func Latest() (string, error) { + latest := make(map[string]interface{}) + + resp, err := http.Get("https://version.crowdsec.net/latest") + if err != nil { + return "", err + } + defer resp.Body.Close() + + err = json.NewDecoder(resp.Body).Decode(&latest) + if err != nil { + return "", err + } + if _, ok := latest["name"]; !ok { + return "", fmt.Errorf("unable to find latest release name from github api: %+v", latest) + } + + return latest["name"].(string), nil +} diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go new file mode 100644 index 0000000..1be53d5 --- /dev/null +++ b/pkg/database/alerts.go @@ -0,0 +1,1144 @@ +package database + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +const ( + paginationSize = 100 // used to queryAlert to avoid 'too many SQL variable' + defaultLimit = 100 // default limit of element to returns when query alerts + bulkSize = 50 // bulk size when create alerts + decisionBulkSize = 50 +) + +func formatAlertAsString(machineId string, alert *models.Alert) []string { + var retStr []string + + /**/ + src := "" + if alert.Source != nil { + if *alert.Source.Scope == types.Ip { + src = fmt.Sprintf("ip %s", *alert.Source.Value) + if alert.Source.Cn != "" { + src += " (" + alert.Source.Cn + if alert.Source.AsNumber != "" { + src += "/" + alert.Source.AsNumber + } + src += ")" + } + } else if *alert.Source.Scope == types.Range { + src = fmt.Sprintf("range %s", *alert.Source.Value) + if alert.Source.Cn != "" { + src += " (" + alert.Source.Cn + if alert.Source.AsNumber != "" { + src += "/" + alert.Source.AsNumber + } + src += ")" + } + } else { + src = fmt.Sprintf("%s %s", *alert.Source.Scope, *alert.Source.Value) + } + } else { + src = "empty source" + } + + /**/ + reason := "" + if *alert.Scenario != "" { + reason = fmt.Sprintf("%s by %s", *alert.Scenario, src) + } else if *alert.Message != "" { + reason = fmt.Sprintf("%s by %s", *alert.Scenario, src) + } else { + reason = fmt.Sprintf("empty scenario by %s", src) + } + + if len(alert.Decisions) > 0 { + for _, decisionItem := range alert.Decisions { + decision := "" + if alert.Simulated != nil && *alert.Simulated { + decision = "(simulated alert)" + } else if decisionItem.Simulated != nil && *decisionItem.Simulated { + decision = "(simulated decision)" + } + if log.GetLevel() >= log.DebugLevel { + /*spew is expensive*/ + log.Debugf("%s", spew.Sdump(decisionItem)) + } + decision += fmt.Sprintf("%s %s on %s %s", *decisionItem.Duration, + *decisionItem.Type, *decisionItem.Scope, *decisionItem.Value) + retStr = append(retStr, + fmt.Sprintf("(%s/%s) %s : %s", machineId, + *decisionItem.Origin, reason, decision)) + } + } else { + retStr = append(retStr, fmt.Sprintf("(%s) alert : %s", machineId, reason)) + } + return retStr +} + +func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, error) { + pageStart := 0 + pageEnd := bulkSize + ret := []string{} + for { + if pageEnd >= len(alertList) { + results, err := c.CreateAlertBulk(machineID, alertList[pageStart:]) + if err != nil { + return []string{}, fmt.Errorf("unable to create alerts: %s", err) + } + ret = append(ret, results...) + break + } + results, err := c.CreateAlertBulk(machineID, alertList[pageStart:pageEnd]) + if err != nil { + return []string{}, fmt.Errorf("unable to create alerts: %s", err) + } + ret = append(ret, results...) + pageStart += bulkSize + pageEnd += bulkSize + } + return ret, nil +} + +/*We can't bulk both the alert and the decision at the same time. With new consensus, we want to bulk a single alert with a lot of decisions.*/ +func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, int, error) { + + var err error + var deleted, inserted int + + if alertItem == nil { + return 0, 0, 0, fmt.Errorf("nil alert") + } + if alertItem.StartAt == nil { + return 0, 0, 0, fmt.Errorf("nil start_at") + } + startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) + if err != nil { + return 0, 0, 0, errors.Wrapf(ParseTimeFail, "start_at field time '%s': %s", *alertItem.StartAt, err) + } + if alertItem.StopAt == nil { + return 0, 0, 0, fmt.Errorf("nil stop_at") + } + stopAtTime, err := time.Parse(time.RFC3339, *alertItem.StopAt) + if err != nil { + return 0, 0, 0, errors.Wrapf(ParseTimeFail, "stop_at field time '%s': %s", *alertItem.StopAt, err) + } + + ts, err := time.Parse(time.RFC3339, *alertItem.StopAt) + if err != nil { + c.Log.Errorf("While parsing StartAt of item %s : %s", *alertItem.StopAt, err) + ts = time.Now().UTC() + } + + alertB := c.Ent.Alert. + Create(). + SetScenario(*alertItem.Scenario). + SetMessage(*alertItem.Message). + SetEventsCount(*alertItem.EventsCount). + SetStartedAt(startAtTime). + SetStoppedAt(stopAtTime). + SetSourceScope(*alertItem.Source.Scope). + SetSourceValue(*alertItem.Source.Value). + SetSourceIp(alertItem.Source.IP). + SetSourceRange(alertItem.Source.Range). + SetSourceAsNumber(alertItem.Source.AsNumber). + SetSourceAsName(alertItem.Source.AsName). + SetSourceCountry(alertItem.Source.Cn). + SetSourceLatitude(alertItem.Source.Latitude). + SetSourceLongitude(alertItem.Source.Longitude). + SetCapacity(*alertItem.Capacity). + SetLeakSpeed(*alertItem.Leakspeed). + SetSimulated(*alertItem.Simulated). + SetScenarioVersion(*alertItem.ScenarioVersion). + SetScenarioHash(*alertItem.ScenarioHash) + + alertRef, err := alertB.Save(c.CTX) + if err != nil { + return 0, 0, 0, errors.Wrapf(BulkError, "error creating alert : %s", err) + } + + if len(alertItem.Decisions) > 0 { + txClient, err := c.Ent.Tx(c.CTX) + if err != nil { + return 0, 0, 0, errors.Wrapf(BulkError, "error creating transaction : %s", err) + } + decisionBulk := make([]*ent.DecisionCreate, 0, decisionBulkSize) + valueList := make([]string, 0, decisionBulkSize) + DecOrigin := CapiMachineID + if *alertItem.Decisions[0].Origin == CapiMachineID || *alertItem.Decisions[0].Origin == CapiListsMachineID { + DecOrigin = *alertItem.Decisions[0].Origin + } else { + log.Warningf("unexpected origin %s", *alertItem.Decisions[0].Origin) + } + for i, decisionItem := range alertItem.Decisions { + var start_ip, start_sfx, end_ip, end_sfx int64 + var sz int + if decisionItem.Duration == nil { + log.Warning("nil duration in community decision") + continue + } + duration, err := time.ParseDuration(*decisionItem.Duration) + if err != nil { + rollbackErr := txClient.Rollback() + if rollbackErr != nil { + log.Errorf("rollback error: %s", rollbackErr) + } + return 0, 0, 0, errors.Wrapf(ParseDurationFail, "decision duration '%+v' : %s", *decisionItem.Duration, err) + } + if decisionItem.Scope == nil { + log.Warning("nil scope in community decision") + continue + } + /*if the scope is IP or Range, convert the value to integers */ + if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" { + sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(*decisionItem.Value) + if err != nil { + rollbackErr := txClient.Rollback() + if rollbackErr != nil { + log.Errorf("rollback error: %s", rollbackErr) + } + return 0, 0, 0, errors.Wrapf(ParseDurationFail, "invalid addr/range %s : %s", *decisionItem.Value, err) + } + } + /*bulk insert some new decisions*/ + decisionBulk = append(decisionBulk, c.Ent.Decision.Create(). + SetUntil(ts.Add(duration)). + SetScenario(*decisionItem.Scenario). + SetType(*decisionItem.Type). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(sz)). + SetValue(*decisionItem.Value). + SetScope(*decisionItem.Scope). + SetOrigin(*decisionItem.Origin). + SetSimulated(*alertItem.Simulated). + SetOwner(alertRef)) + + /*for bulk delete of duplicate decisions*/ + if decisionItem.Value == nil { + log.Warning("nil value in community decision") + continue + } + valueList = append(valueList, *decisionItem.Value) + + if len(decisionBulk) == decisionBulkSize { + + insertedDecisions, err := txClient.Decision.CreateBulk(decisionBulk...).Save(c.CTX) + if err != nil { + rollbackErr := txClient.Rollback() + if rollbackErr != nil { + log.Errorf("rollback error: %s", rollbackErr) + } + return 0, 0, 0, errors.Wrapf(BulkError, "bulk creating decisions : %s", err) + } + inserted += len(insertedDecisions) + + /*Deleting older decisions from capi*/ + deletedDecisions, err := txClient.Decision.Delete(). + Where(decision.And( + decision.OriginEQ(DecOrigin), + decision.Not(decision.HasOwnerWith(alert.IDEQ(alertRef.ID))), + decision.ValueIn(valueList...), + )).Exec(c.CTX) + if err != nil { + rollbackErr := txClient.Rollback() + if rollbackErr != nil { + log.Errorf("rollback error: %s", rollbackErr) + } + return 0, 0, 0, errors.Wrap(err, "while deleting older community blocklist decisions") + } + deleted += deletedDecisions + + if len(alertItem.Decisions)-i <= decisionBulkSize { + decisionBulk = make([]*ent.DecisionCreate, 0, (len(alertItem.Decisions) - i)) + valueList = make([]string, 0, (len(alertItem.Decisions) - i)) + } else { + decisionBulk = make([]*ent.DecisionCreate, 0, decisionBulkSize) + valueList = make([]string, 0, decisionBulkSize) + } + } + + } + log.Debugf("deleted %d decisions for %s vs %s", deleted, DecOrigin, *alertItem.Decisions[0].Origin) + insertedDecisions, err := txClient.Decision.CreateBulk(decisionBulk...).Save(c.CTX) + if err != nil { + return 0, 0, 0, errors.Wrapf(BulkError, "creating alert decisions: %s", err) + } + inserted += len(insertedDecisions) + /*Deleting older decisions from capi*/ + if len(valueList) > 0 { + deletedDecisions, err := txClient.Decision.Delete(). + Where(decision.And( + decision.OriginEQ(DecOrigin), + decision.Not(decision.HasOwnerWith(alert.IDEQ(alertRef.ID))), + decision.ValueIn(valueList...), + )).Exec(c.CTX) + if err != nil { + rollbackErr := txClient.Rollback() + if rollbackErr != nil { + log.Errorf("rollback error: %s", rollbackErr) + } + return 0, 0, 0, errors.Wrap(err, "while deleting older community blocklist decisions") + } + deleted += deletedDecisions + } + err = txClient.Commit() + if err != nil { + rollbackErr := txClient.Rollback() + if rollbackErr != nil { + log.Errorf("rollback error: %s", rollbackErr) + } + return 0, 0, 0, errors.Wrapf(BulkError, "error committing transaction : %s", err) + } + } + + return alertRef.ID, inserted, deleted, nil +} + +func chunkDecisions(decisions []*ent.Decision, chunkSize int) [][]*ent.Decision { + var ret [][]*ent.Decision + var chunk []*ent.Decision + + for _, d := range decisions { + chunk = append(chunk, d) + if len(chunk) == chunkSize { + ret = append(ret, chunk) + chunk = nil + } + } + if len(chunk) > 0 { + ret = append(ret, chunk) + } + return ret +} + +func (c *Client) CreateAlertBulk(machineId string, alertList []*models.Alert) ([]string, error) { + ret := []string{} + bulkSize := 20 + + c.Log.Debugf("writing %d items", len(alertList)) + bulk := make([]*ent.AlertCreate, 0, bulkSize) + alertDecisions := make([][]*ent.Decision, 0, bulkSize) + for i, alertItem := range alertList { + var decisions []*ent.Decision + var metas []*ent.Meta + var events []*ent.Event + + owner, err := c.QueryMachineByID(machineId) + if err != nil { + if errors.Cause(err) != UserNotExists { + return []string{}, errors.Wrapf(QueryFail, "machine '%s': %s", alertItem.MachineID, err) + } + c.Log.Debugf("CreateAlertBulk: Machine Id %s doesn't exist", machineId) + owner = nil + } + startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) + if err != nil { + c.Log.Errorf("CreateAlertBulk: Failed to parse startAtTime '%s', defaulting to now: %s", *alertItem.StartAt, err) + startAtTime = time.Now().UTC() + } + + stopAtTime, err := time.Parse(time.RFC3339, *alertItem.StopAt) + if err != nil { + c.Log.Errorf("CreateAlertBulk: Failed to parse stopAtTime '%s', defaulting to now: %s", *alertItem.StopAt, err) + stopAtTime = time.Now().UTC() + } + /*display proper alert in logs*/ + for _, disp := range formatAlertAsString(machineId, alertItem) { + c.Log.Info(disp) + } + + //let's track when we strip or drop data, notify outside of loop to avoid spam + stripped := false + dropped := false + + if len(alertItem.Events) > 0 { + eventBulk := make([]*ent.EventCreate, len(alertItem.Events)) + for i, eventItem := range alertItem.Events { + ts, err := time.Parse(time.RFC3339, *eventItem.Timestamp) + if err != nil { + c.Log.Errorf("CreateAlertBulk: Failed to parse event timestamp '%s', defaulting to now: %s", *eventItem.Timestamp, err) + ts = time.Now().UTC() + } + marshallMetas, err := json.Marshal(eventItem.Meta) + if err != nil { + return []string{}, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err) + } + + //the serialized field is too big, let's try to progressively strip it + if event.SerializedValidator(string(marshallMetas)) != nil { + stripped = true + + valid := false + stripSize := 2048 + for !valid && stripSize > 0 { + for _, serializedItem := range eventItem.Meta { + if len(serializedItem.Value) > stripSize*2 { + serializedItem.Value = serializedItem.Value[:stripSize] + "" + } + } + + marshallMetas, err = json.Marshal(eventItem.Meta) + if err != nil { + return []string{}, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err) + } + if event.SerializedValidator(string(marshallMetas)) == nil { + valid = true + } + stripSize /= 2 + } + + //nothing worked, drop it + if !valid { + dropped = true + stripped = false + marshallMetas = []byte("") + } + + } + + eventBulk[i] = c.Ent.Event.Create(). + SetTime(ts). + SetSerialized(string(marshallMetas)) + } + if stripped { + c.Log.Warningf("stripped 'serialized' field (machine %s / scenario %s)", machineId, *alertItem.Scenario) + } + if dropped { + c.Log.Warningf("dropped 'serialized' field (machine %s / scenario %s)", machineId, *alertItem.Scenario) + } + events, err = c.Ent.Event.CreateBulk(eventBulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "creating alert events: %s", err) + } + } + + if len(alertItem.Meta) > 0 { + metaBulk := make([]*ent.MetaCreate, len(alertItem.Meta)) + for i, metaItem := range alertItem.Meta { + metaBulk[i] = c.Ent.Meta.Create(). + SetKey(metaItem.Key). + SetValue(metaItem.Value) + } + metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "creating alert meta: %s", err) + } + } + + decisions = make([]*ent.Decision, 0) + if len(alertItem.Decisions) > 0 { + decisionBulk := make([]*ent.DecisionCreate, 0, decisionBulkSize) + for i, decisionItem := range alertItem.Decisions { + var start_ip, start_sfx, end_ip, end_sfx int64 + var sz int + + duration, err := time.ParseDuration(*decisionItem.Duration) + if err != nil { + return []string{}, errors.Wrapf(ParseDurationFail, "decision duration '%+v' : %s", *decisionItem.Duration, err) + } + + /*if the scope is IP or Range, convert the value to integers */ + if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" { + sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(*decisionItem.Value) + if err != nil { + return []string{}, errors.Wrapf(ParseDurationFail, "invalid addr/range %s : %s", *decisionItem.Value, err) + } + } + + decisionCreate := c.Ent.Decision.Create(). + SetUntil(stopAtTime.Add(duration)). + SetScenario(*decisionItem.Scenario). + SetType(*decisionItem.Type). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(sz)). + SetValue(*decisionItem.Value). + SetScope(*decisionItem.Scope). + SetOrigin(*decisionItem.Origin). + SetSimulated(*alertItem.Simulated) + + decisionBulk = append(decisionBulk, decisionCreate) + if len(decisionBulk) == decisionBulkSize { + decisionsCreateRet, err := c.Ent.Decision.CreateBulk(decisionBulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "creating alert decisions: %s", err) + + } + decisions = append(decisions, decisionsCreateRet...) + if len(alertItem.Decisions)-i <= decisionBulkSize { + decisionBulk = make([]*ent.DecisionCreate, 0, (len(alertItem.Decisions) - i)) + } else { + decisionBulk = make([]*ent.DecisionCreate, 0, decisionBulkSize) + } + } + } + decisionsCreateRet, err := c.Ent.Decision.CreateBulk(decisionBulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "creating alert decisions: %s", err) + } + decisions = append(decisions, decisionsCreateRet...) + } + + alertB := c.Ent.Alert. + Create(). + SetScenario(*alertItem.Scenario). + SetMessage(*alertItem.Message). + SetEventsCount(*alertItem.EventsCount). + SetStartedAt(startAtTime). + SetStoppedAt(stopAtTime). + SetSourceScope(*alertItem.Source.Scope). + SetSourceValue(*alertItem.Source.Value). + SetSourceIp(alertItem.Source.IP). + SetSourceRange(alertItem.Source.Range). + SetSourceAsNumber(alertItem.Source.AsNumber). + SetSourceAsName(alertItem.Source.AsName). + SetSourceCountry(alertItem.Source.Cn). + SetSourceLatitude(alertItem.Source.Latitude). + SetSourceLongitude(alertItem.Source.Longitude). + SetCapacity(*alertItem.Capacity). + SetLeakSpeed(*alertItem.Leakspeed). + SetSimulated(*alertItem.Simulated). + SetScenarioVersion(*alertItem.ScenarioVersion). + SetScenarioHash(*alertItem.ScenarioHash). + AddEvents(events...). + AddMetas(metas...) + + if owner != nil { + alertB.SetOwner(owner) + } + bulk = append(bulk, alertB) + alertDecisions = append(alertDecisions, decisions) + + if len(bulk) == bulkSize { + alerts, err := c.Ent.Alert.CreateBulk(bulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "bulk creating alert : %s", err) + } + for alertIndex, a := range alerts { + ret = append(ret, strconv.Itoa(a.ID)) + d := alertDecisions[alertIndex] + decisionsChunk := chunkDecisions(d, bulkSize) + for _, d2 := range decisionsChunk { + _, err := c.Ent.Alert.Update().Where(alert.IDEQ(a.ID)).AddDecisions(d2...).Save(c.CTX) + if err != nil { + return []string{}, fmt.Errorf("error while updating decisions: %s", err) + } + } + } + if len(alertList)-i <= bulkSize { + bulk = make([]*ent.AlertCreate, 0, (len(alertList) - i)) + alertDecisions = make([][]*ent.Decision, 0, (len(alertList) - i)) + } else { + bulk = make([]*ent.AlertCreate, 0, bulkSize) + alertDecisions = make([][]*ent.Decision, 0, bulkSize) + } + } + } + + alerts, err := c.Ent.Alert.CreateBulk(bulk...).Save(c.CTX) + if err != nil { + return []string{}, errors.Wrapf(BulkError, "leftovers creating alert : %s", err) + } + + for alertIndex, a := range alerts { + ret = append(ret, strconv.Itoa(a.ID)) + d := alertDecisions[alertIndex] + decisionsChunk := chunkDecisions(d, bulkSize) + for _, d2 := range decisionsChunk { + _, err := c.Ent.Alert.Update().Where(alert.IDEQ(a.ID)).AddDecisions(d2...).Save(c.CTX) + if err != nil { + return []string{}, fmt.Errorf("error while updating decisions: %s", err) + } + } + } + + return ret, nil +} + +func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { + predicates := make([]predicate.Alert, 0) + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var hasActiveDecision bool + var ip_sz int + var contains bool = true + /*if contains is true, return bans that *contains* the given value (value is the inner) + else, return bans that are *contained* by the given value (value is the outer)*/ + + /*the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ + if v, ok := filter["simulated"]; ok { + if v[0] == "false" { + predicates = append(predicates, alert.SimulatedEQ(false)) + } + } + + if _, ok := filter["origin"]; ok { + filter["include_capi"] = []string{"true"} + } + + for param, value := range filter { + switch param { + case "contains": + contains, err = strconv.ParseBool(value[0]) + if err != nil { + return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) + } + case "scope": + var scope string = value[0] + if strings.ToLower(scope) == "ip" { + scope = types.Ip + } else if strings.ToLower(scope) == "range" { + scope = types.Range + } + predicates = append(predicates, alert.SourceScopeEQ(scope)) + case "value": + predicates = append(predicates, alert.SourceValueEQ(value[0])) + case "scenario": + predicates = append(predicates, alert.HasDecisionsWith(decision.ScenarioEQ(value[0]))) + case "ip", "range": + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) + if err != nil { + return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) + } + case "since": + duration, err := types.ParseDuration(value[0]) + if err != nil { + return nil, errors.Wrap(err, "while parsing duration") + } + since := time.Now().UTC().Add(-duration) + if since.IsZero() { + return nil, fmt.Errorf("Empty time now() - %s", since.String()) + } + predicates = append(predicates, alert.StartedAtGTE(since)) + case "created_before": + duration, err := types.ParseDuration(value[0]) + if err != nil { + return nil, errors.Wrap(err, "while parsing duration") + } + since := time.Now().UTC().Add(-duration) + if since.IsZero() { + return nil, fmt.Errorf("Empty time now() - %s", since.String()) + } + predicates = append(predicates, alert.CreatedAtLTE(since)) + case "until": + duration, err := types.ParseDuration(value[0]) + if err != nil { + return nil, errors.Wrap(err, "while parsing duration") + } + until := time.Now().UTC().Add(-duration) + if until.IsZero() { + return nil, fmt.Errorf("Empty time now() - %s", until.String()) + } + predicates = append(predicates, alert.StartedAtLTE(until)) + case "decision_type": + predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) + case "origin": + predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) + case "include_capi": //allows to exclude one or more specific origins + if value[0] == "false" { + predicates = append(predicates, alert.HasDecisionsWith(decision.Or(decision.OriginEQ("crowdsec"), decision.OriginEQ("cscli")))) + } else if value[0] != "true" { + log.Errorf("Invalid bool '%s' for include_capi", value[0]) + } + case "has_active_decision": + if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { + return nil, errors.Wrapf(ParseType, "'%s' is not a boolean: %s", value[0], err) + } + if hasActiveDecision { + predicates = append(predicates, alert.HasDecisionsWith(decision.UntilGTE(time.Now().UTC()))) + } else { + predicates = append(predicates, alert.Not(alert.HasDecisions())) + } + case "limit": + continue + case "sort": + continue + case "simulated": + continue + default: + return nil, errors.Wrapf(InvalidFilter, "Filter parameter '%s' is unknown (=%s)", param, value[0]) + } + } + + if ip_sz == 4 { + if contains { /*decision contains {start_ip,end_ip}*/ + predicates = append(predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } else { /*decision is contained within {start_ip,end_ip}*/ + predicates = append(predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } + } else if ip_sz == 16 { + + if contains { /*decision contains {start_ip,end_ip}*/ + predicates = append(predicates, alert.And( + //matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + //decision.start_ip < query.start_ip + alert.HasDecisionsWith(decision.StartIPLT(start_ip)), + alert.And( + //decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + //decision.start_suffix <= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), + )), + alert.Or( + //decision.end_ip > query.end_ip + alert.HasDecisionsWith(decision.EndIPGT(end_ip)), + alert.And( + //decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + //decision.end_suffix >= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), + ), + ), + )) + } else { /*decision is contained within {start_ip,end_ip}*/ + predicates = append(predicates, alert.And( + //matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + //decision.start_ip > query.start_ip + alert.HasDecisionsWith(decision.StartIPGT(start_ip)), + alert.And( + //decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + //decision.start_suffix >= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), + )), + alert.Or( + //decision.end_ip < query.end_ip + alert.HasDecisionsWith(decision.EndIPLT(end_ip)), + alert.And( + //decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + //decision.end_suffix <= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), + ), + ), + )) + } + } else if ip_sz != 0 { + return nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + } + return predicates, nil +} +func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]string) (*ent.AlertQuery, error) { + preds, err := AlertPredicatesFromFilter(filter) + if err != nil { + return nil, err + } + return alerts.Where(preds...), nil +} + +func (c *Client) AlertsCountPerScenario(filters map[string][]string) (map[string]int, error) { + + var res []struct { + Scenario string + Count int + } + + ctx := context.Background() + + query := c.Ent.Alert.Query() + + query, err := BuildAlertRequestFromFilter(query, filters) + + if err != nil { + return nil, errors.Wrap(err, "failed to build alert request") + } + + err = query.GroupBy(alert.FieldScenario).Aggregate(ent.Count()).Scan(ctx, &res) + + if err != nil { + return nil, errors.Wrap(err, "failed to count alerts per scenario") + } + + counts := make(map[string]int) + + for _, r := range res { + counts[r.Scenario] = r.Count + } + + return counts, nil +} + +func (c *Client) TotalAlerts() (int, error) { + return c.Ent.Alert.Query().Count(c.CTX) +} + +func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, error) { + sort := "DESC" // we sort by desc by default + if val, ok := filter["sort"]; ok { + if val[0] != "ASC" && val[0] != "DESC" { + c.Log.Errorf("invalid 'sort' parameter: %s", val) + } else { + sort = val[0] + } + } + limit := defaultLimit + if val, ok := filter["limit"]; ok { + limitConv, err := strconv.Atoi(val[0]) + if err != nil { + return []*ent.Alert{}, errors.Wrapf(QueryFail, "bad limit in parameters: %s", val) + } + limit = limitConv + + } + offset := 0 + ret := make([]*ent.Alert, 0) + for { + alerts := c.Ent.Alert.Query() + alerts, err := BuildAlertRequestFromFilter(alerts, filter) + if err != nil { + return []*ent.Alert{}, err + } + alerts = alerts. + WithDecisions(). + WithEvents(). + WithMetas(). + WithOwner() + + if limit == 0 { + limit, err = alerts.Count(c.CTX) + if err != nil { + return []*ent.Alert{}, fmt.Errorf("unable to count nb alerts: %s", err) + } + } + + if sort == "ASC" { + alerts = alerts.Order(ent.Asc(alert.FieldCreatedAt), ent.Asc(alert.FieldID)) + } else { + alerts = alerts.Order(ent.Desc(alert.FieldCreatedAt), ent.Desc(alert.FieldID)) + } + + result, err := alerts.Limit(paginationSize).Offset(offset).All(c.CTX) + if err != nil { + return []*ent.Alert{}, errors.Wrapf(QueryFail, "pagination size: %d, offset: %d: %s", paginationSize, offset, err) + } + if diff := limit - len(ret); diff < paginationSize { + if len(result) < diff { + ret = append(ret, result...) + c.Log.Debugf("Pagination done, %d < %d", len(result), diff) + break + } + ret = append(ret, result[0:diff]...) + + } else { + ret = append(ret, result...) + } + if len(ret) == limit || len(ret) == 0 || len(ret) < paginationSize { + c.Log.Debugf("Pagination done len(ret) = %d", len(ret)) + break + } + offset += paginationSize + } + + return ret, nil +} + +func (c *Client) DeleteAlertGraphBatch(alertItems []*ent.Alert) (int, error) { + idList := make([]int, 0) + for _, alert := range alertItems { + idList = append(idList, alert.ID) + } + + _, err := c.Ent.Event.Delete(). + Where(event.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + if err != nil { + c.Log.Warningf("DeleteAlertGraphBatch : %s", err) + return 0, errors.Wrapf(DeleteFail, "alert graph delete batch events") + } + + _, err = c.Ent.Meta.Delete(). + Where(meta.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + if err != nil { + c.Log.Warningf("DeleteAlertGraphBatch : %s", err) + return 0, errors.Wrapf(DeleteFail, "alert graph delete batch meta") + } + + _, err = c.Ent.Decision.Delete(). + Where(decision.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + if err != nil { + c.Log.Warningf("DeleteAlertGraphBatch : %s", err) + return 0, errors.Wrapf(DeleteFail, "alert graph delete batch decisions") + } + + deleted, err := c.Ent.Alert.Delete(). + Where(alert.IDIn(idList...)).Exec(c.CTX) + if err != nil { + c.Log.Warningf("DeleteAlertGraphBatch : %s", err) + return deleted, errors.Wrapf(DeleteFail, "alert graph delete batch") + } + + c.Log.Debug("Done batch delete alerts") + + return deleted, nil +} + +func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { + // delete the associated events + _, err := c.Ent.Event.Delete(). + Where(event.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + if err != nil { + c.Log.Warningf("DeleteAlertGraph : %s", err) + return errors.Wrapf(DeleteFail, "event with alert ID '%d'", alertItem.ID) + } + + // delete the associated meta + _, err = c.Ent.Meta.Delete(). + Where(meta.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + if err != nil { + c.Log.Warningf("DeleteAlertGraph : %s", err) + return errors.Wrapf(DeleteFail, "meta with alert ID '%d'", alertItem.ID) + } + + // delete the associated decisions + _, err = c.Ent.Decision.Delete(). + Where(decision.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + if err != nil { + c.Log.Warningf("DeleteAlertGraph : %s", err) + return errors.Wrapf(DeleteFail, "decision with alert ID '%d'", alertItem.ID) + } + + // delete the alert + err = c.Ent.Alert.DeleteOne(alertItem).Exec(c.CTX) + if err != nil { + c.Log.Warningf("DeleteAlertGraph : %s", err) + return errors.Wrapf(DeleteFail, "alert with ID '%d'", alertItem.ID) + } + + return nil +} + +func (c *Client) DeleteAlertByID(id int) error { + alertItem, err := c.Ent.Alert.Query().Where(alert.IDEQ(id)).Only(c.CTX) + if err != nil { + return err + } + + return c.DeleteAlertGraph(alertItem) +} + +func (c *Client) DeleteAlertWithFilter(filter map[string][]string) (int, error) { + preds, err := AlertPredicatesFromFilter(filter) + if err != nil { + return 0, err + } + return c.Ent.Alert.Delete().Where(preds...).Exec(c.CTX) +} + +func (c *Client) FlushOrphans() { + /* While it has only been linked to some very corner-case bug : https://github.com/crowdsecurity/crowdsec/issues/778 */ + /* We want to take care of orphaned events for which the parent alert/decision has been deleted */ + + events_count, err := c.Ent.Event.Delete().Where(event.Not(event.HasOwner())).Exec(c.CTX) + if err != nil { + c.Log.Warningf("error while deleting orphan events : %s", err) + return + } + if events_count > 0 { + c.Log.Infof("%d deleted orphan events", events_count) + } + + events_count, err = c.Ent.Decision.Delete().Where( + decision.Not(decision.HasOwner())).Where(decision.UntilLTE(time.Now().UTC())).Exec(c.CTX) + + if err != nil { + c.Log.Warningf("error while deleting orphan decisions : %s", err) + return + } + if events_count > 0 { + c.Log.Infof("%d deleted orphan decisions", events_count) + } +} + +func (c *Client) FlushAgentsAndBouncers(agentsCfg *csconfig.AuthGCCfg, bouncersCfg *csconfig.AuthGCCfg) error { + log.Debug("starting FlushAgentsAndBouncers") + if bouncersCfg != nil { + if bouncersCfg.ApiDuration != nil { + log.Debug("trying to delete old bouncers from api") + deletionCount, err := c.Ent.Bouncer.Delete().Where( + bouncer.LastPullLTE(time.Now().UTC().Add(-*bouncersCfg.ApiDuration)), + ).Where( + bouncer.AuthTypeEQ(types.ApiKeyAuthType), + ).Exec(c.CTX) + if err != nil { + c.Log.Errorf("while auto-deleting expired bouncers (api key) : %s", err) + } else if deletionCount > 0 { + c.Log.Infof("deleted %d expired bouncers (api auth)", deletionCount) + } + } + if bouncersCfg.CertDuration != nil { + log.Debug("trying to delete old bouncers from cert") + + deletionCount, err := c.Ent.Bouncer.Delete().Where( + bouncer.LastPullLTE(time.Now().UTC().Add(-*bouncersCfg.CertDuration)), + ).Where( + bouncer.AuthTypeEQ(types.TlsAuthType), + ).Exec(c.CTX) + if err != nil { + c.Log.Errorf("while auto-deleting expired bouncers (api key) : %s", err) + } else if deletionCount > 0 { + c.Log.Infof("deleted %d expired bouncers (api auth)", deletionCount) + } + } + } + + if agentsCfg != nil { + if agentsCfg.CertDuration != nil { + log.Debug("trying to delete old agents from cert") + + deletionCount, err := c.Ent.Machine.Delete().Where( + machine.LastHeartbeatLTE(time.Now().UTC().Add(-*agentsCfg.CertDuration)), + ).Where( + machine.Not(machine.HasAlerts()), + ).Where( + machine.AuthTypeEQ(types.TlsAuthType), + ).Exec(c.CTX) + log.Debugf("deleted %d entries", deletionCount) + if err != nil { + c.Log.Errorf("while auto-deleting expired machine (cert) : %s", err) + } else if deletionCount > 0 { + c.Log.Infof("deleted %d expired machine (cert auth)", deletionCount) + } + } + if agentsCfg.LoginPasswordDuration != nil { + log.Debug("trying to delete old agents from password") + + deletionCount, err := c.Ent.Machine.Delete().Where( + machine.LastHeartbeatLTE(time.Now().UTC().Add(-*agentsCfg.LoginPasswordDuration)), + ).Where( + machine.Not(machine.HasAlerts()), + ).Where( + machine.AuthTypeEQ(types.PasswordAuthType), + ).Exec(c.CTX) + log.Debugf("deleted %d entries", deletionCount) + if err != nil { + c.Log.Errorf("while auto-deleting expired machine (password) : %s", err) + } else if deletionCount > 0 { + c.Log.Infof("deleted %d expired machine (password auth)", deletionCount) + } + } + } + return nil +} + +func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { + var deletedByAge int + var deletedByNbItem int + var totalAlerts int + var err error + + if !c.CanFlush { + c.Log.Debug("a list is being imported, flushing later") + return nil + } + + c.Log.Debug("Flushing orphan alerts") + c.FlushOrphans() + c.Log.Debug("Done flushing orphan alerts") + totalAlerts, err = c.TotalAlerts() + if err != nil { + c.Log.Warningf("FlushAlerts (max items count) : %s", err) + return errors.Wrap(err, "unable to get alerts count") + } + c.Log.Debugf("FlushAlerts (Total alerts): %d", totalAlerts) + if MaxAge != "" { + filter := map[string][]string{ + "created_before": {MaxAge}, + } + nbDeleted, err := c.DeleteAlertWithFilter(filter) + if err != nil { + c.Log.Warningf("FlushAlerts (max age) : %s", err) + return errors.Wrapf(err, "unable to flush alerts with filter until: %s", MaxAge) + } + c.Log.Debugf("FlushAlerts (deleted max age alerts): %d", nbDeleted) + deletedByAge = nbDeleted + } + if MaxItems > 0 { + //We get the highest id for the alerts + //We subtract MaxItems to avoid deleting alerts that are not old enough + //This gives us the oldest alert that we want to keep + //We then delete all the alerts with an id lower than this one + //We can do this because the id is auto-increment, and the database won't reuse the same id twice + lastAlert, err := c.QueryAlertWithFilter(map[string][]string{ + "sort": {"DESC"}, + "limit": {"1"}, + }) + c.Log.Debugf("FlushAlerts (last alert): %+v", lastAlert) + if err != nil { + c.Log.Errorf("FlushAlerts: could not get last alert: %s", err) + return errors.Wrap(err, "could not get last alert") + } + + if len(lastAlert) != 0 { + maxid := lastAlert[0].ID - MaxItems + + c.Log.Debugf("FlushAlerts (max id): %d", maxid) + + if maxid > 0 { + //This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted + deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(c.CTX) + + if err != nil { + c.Log.Errorf("FlushAlerts: Could not delete alerts : %s", err) + return errors.Wrap(err, "could not delete alerts") + } + } + } + } + if deletedByNbItem > 0 { + c.Log.Infof("flushed %d/%d alerts because max number of alerts has been reached (%d max)", deletedByNbItem, totalAlerts, MaxItems) + } + if deletedByAge > 0 { + c.Log.Infof("flushed %d/%d alerts because they were created %s ago or more", deletedByAge, totalAlerts, MaxAge) + } + return nil +} + +func (c *Client) GetAlertByID(alertID int) (*ent.Alert, error) { + alert, err := c.Ent.Alert.Query().Where(alert.IDEQ(alertID)).WithDecisions().WithEvents().WithMetas().WithOwner().First(c.CTX) + if err != nil { + /*record not found, 404*/ + if ent.IsNotFound(err) { + log.Warningf("GetAlertByID (not found): %s", err) + return &ent.Alert{}, ItemNotFound + } + c.Log.Warningf("GetAlertByID : %s", err) + return &ent.Alert{}, QueryFail + } + return alert, nil +} diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go new file mode 100644 index 0000000..4cd32d8 --- /dev/null +++ b/pkg/database/bouncers.go @@ -0,0 +1,95 @@ +package database + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/pkg/errors" +) + +func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(c.CTX) + if err != nil { + return &ent.Bouncer{}, errors.Wrapf(QueryFail, "select bouncer: %s", err) + } + + return result, nil +} + +func (c *Client) SelectBouncerByName(bouncerName string) (*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().Where(bouncer.NameEQ(bouncerName)).First(c.CTX) + if err != nil { + return &ent.Bouncer{}, errors.Wrapf(QueryFail, "select bouncer: %s", err) + } + + return result, nil +} + +func (c *Client) ListBouncers() ([]*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().All(c.CTX) + if err != nil { + return []*ent.Bouncer{}, errors.Wrapf(QueryFail, "listing bouncer: %s", err) + } + return result, nil +} + +func (c *Client) CreateBouncer(name string, ipAddr string, apiKey string, authType string) (*ent.Bouncer, error) { + bouncer, err := c.Ent.Bouncer. + Create(). + SetName(name). + SetAPIKey(apiKey). + SetRevoked(false). + SetAuthType(authType). + Save(c.CTX) + if err != nil { + if ent.IsConstraintError(err) { + return nil, fmt.Errorf("bouncer %s already exists", name) + } + return nil, fmt.Errorf("unable to create bouncer: %s", err) + } + return bouncer, nil +} + +func (c *Client) DeleteBouncer(name string) error { + nbDeleted, err := c.Ent.Bouncer. + Delete(). + Where(bouncer.NameEQ(name)). + Exec(c.CTX) + if err != nil { + return err + } + + if nbDeleted == 0 { + return fmt.Errorf("bouncer doesn't exist") + } + + return nil +} + +func (c *Client) UpdateBouncerLastPull(lastPull time.Time, ID int) error { + _, err := c.Ent.Bouncer.UpdateOneID(ID). + SetLastPull(lastPull). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update machine last pull in database: %s", err) + } + return nil +} + +func (c *Client) UpdateBouncerIP(ipAddr string, ID int) error { + _, err := c.Ent.Bouncer.UpdateOneID(ID).SetIPAddress(ipAddr).Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update bouncer ip address in database: %s", err) + } + return nil +} + +func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, ID int) error { + _, err := c.Ent.Bouncer.UpdateOneID(ID).SetVersion(version).SetType(bType).Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update bouncer type and version in database: %s", err) + } + return nil +} diff --git a/pkg/database/database.go b/pkg/database/database.go new file mode 100644 index 0000000..71ce076 --- /dev/null +++ b/pkg/database/database.go @@ -0,0 +1,190 @@ +package database + +import ( + "context" + "database/sql" + "fmt" + "os" + "time" + + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/go-co-op/gocron" + _ "github.com/go-sql-driver/mysql" + _ "github.com/jackc/pgx/v4/stdlib" + _ "github.com/lib/pq" + _ "github.com/mattn/go-sqlite3" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +type Client struct { + Ent *ent.Client + CTX context.Context + Log *log.Logger + CanFlush bool + Type string + WalMode *bool +} + +func getEntDriver(dbtype string, dbdialect string, dsn string, config *csconfig.DatabaseCfg) (*entsql.Driver, error) { + db, err := sql.Open(dbtype, dsn) + if err != nil { + return nil, err + } + if config.MaxOpenConns == nil { + log.Warningf("MaxOpenConns is 0, defaulting to %d", csconfig.DEFAULT_MAX_OPEN_CONNS) + config.MaxOpenConns = types.IntPtr(csconfig.DEFAULT_MAX_OPEN_CONNS) + } + db.SetMaxOpenConns(*config.MaxOpenConns) + drv := entsql.OpenDB(dbdialect, db) + return drv, nil +} + +func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { + var client *ent.Client + var err error + if config == nil { + return &Client{}, fmt.Errorf("DB config is empty") + } + /*The logger that will be used by db operations*/ + clog := log.New() + if err := types.ConfigureLogger(clog); err != nil { + return nil, errors.Wrap(err, "while configuring db logger") + } + if config.LogLevel != nil { + clog.SetLevel(*config.LogLevel) + } + entLogger := clog.WithField("context", "ent") + + entOpt := ent.Log(entLogger.Debug) + switch config.Type { + case "sqlite": + /*if it's the first startup, we want to touch and chmod file*/ + if _, err := os.Stat(config.DbPath); os.IsNotExist(err) { + f, err := os.OpenFile(config.DbPath, os.O_CREATE|os.O_RDWR, 0600) + if err != nil { + return &Client{}, errors.Wrapf(err, "failed to create SQLite database file %q", config.DbPath) + } + if err := f.Close(); err != nil { + return &Client{}, errors.Wrapf(err, "failed to create SQLite database file %q", config.DbPath) + } + } + //Always try to set permissions to simplify a bit the code for windows (as the permissions set by OpenFile will be garbage) + if err := setFilePerm(config.DbPath, 0600); err != nil { + return &Client{}, fmt.Errorf("unable to set perms on %s: %v", config.DbPath, err) + } + var sqliteConnectionStringParameters string + if config.UseWal != nil && *config.UseWal { + sqliteConnectionStringParameters = "_busy_timeout=100000&_fk=1&_journal_mode=WAL" + } else { + sqliteConnectionStringParameters = "_busy_timeout=100000&_fk=1" + } + drv, err := getEntDriver("sqlite3", dialect.SQLite, fmt.Sprintf("file:%s?%s", config.DbPath, sqliteConnectionStringParameters), config) + if err != nil { + return &Client{}, errors.Wrapf(err, "failed opening connection to sqlite: %v", config.DbPath) + } + client = ent.NewClient(ent.Driver(drv), entOpt) + case "mysql": + drv, err := getEntDriver("mysql", dialect.MySQL, fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=True", config.User, config.Password, config.Host, config.Port, config.DbName), config) + if err != nil { + return &Client{}, fmt.Errorf("failed opening connection to mysql: %v", err) + } + client = ent.NewClient(ent.Driver(drv), entOpt) + case "postgres", "postgresql": + drv, err := getEntDriver("postgres", dialect.Postgres, fmt.Sprintf("host=%s port=%d user=%s dbname=%s password=%s sslmode=%s", config.Host, config.Port, config.User, config.DbName, config.Password, config.Sslmode), config) + if err != nil { + return &Client{}, fmt.Errorf("failed opening connection to postgresql: %v", err) + } + client = ent.NewClient(ent.Driver(drv), entOpt) + case "pgx": + drv, err := getEntDriver("pgx", dialect.Postgres, fmt.Sprintf("postgresql://%s:%s@%s:%d/%s?sslmode=%s", config.User, config.Password, config.Host, config.Port, config.DbName, config.Sslmode), config) + if err != nil { + return &Client{}, fmt.Errorf("failed opening connection to pgx: %v", err) + } + client = ent.NewClient(ent.Driver(drv), entOpt) + default: + return &Client{}, fmt.Errorf("unknown database type '%s'", config.Type) + } + + if config.LogLevel != nil && *config.LogLevel >= log.DebugLevel { + clog.Debugf("Enabling request debug") + client = client.Debug() + } + if err = client.Schema.Create(context.Background()); err != nil { + return nil, fmt.Errorf("failed creating schema resources: %v", err) + } + return &Client{Ent: client, CTX: context.Background(), Log: clog, CanFlush: true, Type: config.Type, WalMode: config.UseWal}, nil +} + +func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { + maxItems := 0 + maxAge := "" + if config.MaxItems != nil && *config.MaxItems <= 0 { + return nil, fmt.Errorf("max_items can't be zero or negative number") + } + if config.MaxItems != nil { + maxItems = *config.MaxItems + } + if config.MaxAge != nil && *config.MaxAge != "" { + maxAge = *config.MaxAge + } + + // Init & Start cronjob every minute for alerts + scheduler := gocron.NewScheduler(time.UTC) + job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, maxAge, maxItems) + if err != nil { + return nil, errors.Wrap(err, "while starting FlushAlerts scheduler") + } + job.SingletonMode() + // Init & Start cronjob every hour for bouncers/agents + if config.AgentsGC != nil { + if config.AgentsGC.Cert != nil { + duration, err := types.ParseDuration(*config.AgentsGC.Cert) + if err != nil { + return nil, errors.Wrap(err, "while parsing agents cert auto-delete duration") + } + config.AgentsGC.CertDuration = &duration + } + if config.AgentsGC.LoginPassword != nil { + duration, err := types.ParseDuration(*config.AgentsGC.LoginPassword) + if err != nil { + return nil, errors.Wrap(err, "while parsing agents login/password auto-delete duration") + } + config.AgentsGC.LoginPasswordDuration = &duration + } + if config.AgentsGC.Api != nil { + log.Warning("agents auto-delete for API auth is not supported (use cert or login_password)") + } + } + if config.BouncersGC != nil { + if config.BouncersGC.Cert != nil { + duration, err := types.ParseDuration(*config.BouncersGC.Cert) + if err != nil { + return nil, errors.Wrap(err, "while parsing bouncers cert auto-delete duration") + } + config.BouncersGC.CertDuration = &duration + } + if config.BouncersGC.Api != nil { + duration, err := types.ParseDuration(*config.BouncersGC.Api) + if err != nil { + return nil, errors.Wrap(err, "while parsing bouncers api auto-delete duration") + } + config.BouncersGC.ApiDuration = &duration + } + if config.BouncersGC.LoginPassword != nil { + log.Warning("bouncers auto-delete for login/password auth is not supported (use cert or api)") + } + } + baJob, err := scheduler.Every(1).Minute().Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) + if err != nil { + return nil, errors.Wrap(err, "while starting FlushAgentsAndBouncers scheduler") + } + baJob.SingletonMode() + scheduler.StartAsync() + + return scheduler, nil +} diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go new file mode 100644 index 0000000..6056996 --- /dev/null +++ b/pkg/database/decisions.go @@ -0,0 +1,652 @@ +package database + +import ( + "fmt" + "strings" + "time" + + "strconv" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" +) + +type DecisionsByScenario struct { + Scenario string + Count int + Origin string + Type string +} + +func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string][]string) (*ent.DecisionQuery, error) { + + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz int + var contains bool = true + /*if contains is true, return bans that *contains* the given value (value is the inner) + else, return bans that are *contained* by the given value (value is the outer)*/ + + /*the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ + if v, ok := filter["simulated"]; ok { + if v[0] == "false" { + query = query.Where(decision.SimulatedEQ(false)) + } + delete(filter, "simulated") + } else { + query = query.Where(decision.SimulatedEQ(false)) + } + + for param, value := range filter { + switch param { + case "contains": + contains, err = strconv.ParseBool(value[0]) + if err != nil { + return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) + } + case "scopes": + scopes := strings.Split(value[0], ",") + for i, scope := range scopes { + switch strings.ToLower(scope) { + case "ip": + scopes[i] = types.Ip + case "range": + scopes[i] = types.Range + case "country": + scopes[i] = types.Country + case "as": + scopes[i] = types.AS + } + } + query = query.Where(decision.ScopeIn(scopes...)) + case "value": + query = query.Where(decision.ValueEQ(value[0])) + case "type": + query = query.Where(decision.TypeEQ(value[0])) + case "origins": + query = query.Where( + decision.OriginIn(strings.Split(value[0], ",")...), + ) + case "scenarios_containing": + predicates := decisionPredicatesFromStr(value[0], decision.ScenarioContainsFold) + query = query.Where(decision.Or(predicates...)) + case "scenarios_not_containing": + predicates := decisionPredicatesFromStr(value[0], decision.ScenarioContainsFold) + query = query.Where(decision.Not( + decision.Or( + predicates..., + ), + )) + case "ip", "range": + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) + if err != nil { + return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) + } + } + } + query, err = applyStartIpEndIpFilter(query, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + if err != nil { + return nil, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") + } + return query, nil +} +func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*ent.Decision, error) { + query := c.Ent.Decision.Query().Where( + decision.UntilGT(time.Now().UTC()), + ) + //Allow a bouncer to ask for non-deduplicated results + if v, ok := filters["dedup"]; !ok || v[0] != "false" { + query = query.Where(longestDecisionForScopeTypeValue) + } + + query, err := BuildDecisionRequestWithFilter(query, filters) + + if err != nil { + c.Log.Warningf("QueryAllDecisionsWithFilters : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "get all decisions with filters") + } + + data, err := query.All(c.CTX) + if err != nil { + c.Log.Warningf("QueryAllDecisionsWithFilters : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "get all decisions with filters") + } + return data, nil +} + +func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ([]*ent.Decision, error) { + query := c.Ent.Decision.Query().Where( + decision.UntilLT(time.Now().UTC()), + ) + //Allow a bouncer to ask for non-deduplicated results + if v, ok := filters["dedup"]; !ok || v[0] != "false" { + query = query.Where(longestDecisionForScopeTypeValue) + } + + query, err := BuildDecisionRequestWithFilter(query, filters) + + if err != nil { + c.Log.Warningf("QueryExpiredDecisionsWithFilters : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "get expired decisions with filters") + } + data, err := query.All(c.CTX) + if err != nil { + c.Log.Warningf("QueryExpiredDecisionsWithFilters : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions") + } + return data, nil +} + +func (c *Client) QueryDecisionCountByScenario(filters map[string][]string) ([]*DecisionsByScenario, error) { + query := c.Ent.Decision.Query().Where( + decision.UntilGT(time.Now().UTC()), + ) + query, err := BuildDecisionRequestWithFilter(query, filters) + + if err != nil { + c.Log.Warningf("QueryDecisionCountByScenario : %s", err) + return nil, errors.Wrap(QueryFail, "count all decisions with filters") + } + + var r []*DecisionsByScenario + + err = query.GroupBy(decision.FieldScenario, decision.FieldOrigin, decision.FieldType).Aggregate(ent.Count()).Scan(c.CTX, &r) + + if err != nil { + c.Log.Warningf("QueryDecisionCountByScenario : %s", err) + return nil, errors.Wrap(QueryFail, "count all decisions with filters") + } + + return r, nil +} + +func (c *Client) QueryDecisionWithFilter(filter map[string][]string) ([]*ent.Decision, error) { + var data []*ent.Decision + var err error + + decisions := c.Ent.Decision.Query(). + Where(decision.UntilGTE(time.Now().UTC())) + + decisions, err = BuildDecisionRequestWithFilter(decisions, filter) + if err != nil { + return []*ent.Decision{}, err + } + + err = decisions.Select( + decision.FieldID, + decision.FieldUntil, + decision.FieldScenario, + decision.FieldType, + decision.FieldStartIP, + decision.FieldEndIP, + decision.FieldValue, + decision.FieldScope, + decision.FieldOrigin, + ).Scan(c.CTX, &data) + if err != nil { + c.Log.Warningf("QueryDecisionWithFilter : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "query decision failed") + } + + return data, nil +} + +// ent translation of https://stackoverflow.com/a/28090544 +func longestDecisionForScopeTypeValue(s *sql.Selector) { + t := sql.Table(decision.Table) + s.LeftJoin(t).OnP(sql.And( + sql.ColumnsEQ( + t.C(decision.FieldValue), + s.C(decision.FieldValue), + ), + sql.ColumnsEQ( + t.C(decision.FieldType), + s.C(decision.FieldType), + ), + sql.ColumnsEQ( + t.C(decision.FieldScope), + s.C(decision.FieldScope), + ), + sql.ColumnsGT( + t.C(decision.FieldUntil), + s.C(decision.FieldUntil), + ), + )) + s.Where( + sql.IsNull( + t.C(decision.FieldUntil), + ), + ) +} + +func (c *Client) QueryExpiredDecisionsSinceWithFilters(since time.Time, filters map[string][]string) ([]*ent.Decision, error) { + query := c.Ent.Decision.Query().Where( + decision.UntilLT(time.Now().UTC()), + decision.UntilGT(since), + ) + //Allow a bouncer to ask for non-deduplicated results + if v, ok := filters["dedup"]; !ok || v[0] != "false" { + query = query.Where(longestDecisionForScopeTypeValue) + } + query, err := BuildDecisionRequestWithFilter(query, filters) + if err != nil { + c.Log.Warningf("QueryExpiredDecisionsSinceWithFilters : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions with filters") + } + + data, err := query.All(c.CTX) + if err != nil { + c.Log.Warningf("QueryExpiredDecisionsSinceWithFilters : %s", err) + return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions with filters") + } + + return data, nil +} + +func (c *Client) QueryNewDecisionsSinceWithFilters(since time.Time, filters map[string][]string) ([]*ent.Decision, error) { + query := c.Ent.Decision.Query().Where( + decision.CreatedAtGT(since), + decision.UntilGT(time.Now().UTC()), + ) + //Allow a bouncer to ask for non-deduplicated results + if v, ok := filters["dedup"]; !ok || v[0] != "false" { + query = query.Where(longestDecisionForScopeTypeValue) + } + query, err := BuildDecisionRequestWithFilter(query, filters) + if err != nil { + c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err) + return []*ent.Decision{}, errors.Wrapf(QueryFail, "new decisions since '%s'", since.String()) + } + data, err := query.All(c.CTX) + if err != nil { + c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err) + return []*ent.Decision{}, errors.Wrapf(QueryFail, "new decisions since '%s'", since.String()) + } + return data, nil +} + +func (c *Client) DeleteDecisionById(decisionId int) error { + err := c.Ent.Decision.DeleteOneID(decisionId).Exec(c.CTX) + if err != nil { + c.Log.Warningf("DeleteDecisionById : %s", err) + return errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionId) + } + return nil +} + +func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, error) { + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz int + var contains bool = true + /*if contains is true, return bans that *contains* the given value (value is the inner) + else, return bans that are *contained* by the given value (value is the outer) */ + + decisions := c.Ent.Decision.Delete() + for param, value := range filter { + switch param { + case "contains": + contains, err = strconv.ParseBool(value[0]) + if err != nil { + return "0", errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) + } + case "scope": + decisions = decisions.Where(decision.ScopeEQ(value[0])) + case "value": + decisions = decisions.Where(decision.ValueEQ(value[0])) + case "type": + decisions = decisions.Where(decision.TypeEQ(value[0])) + case "ip", "range": + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) + if err != nil { + return "0", errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) + } + case "scenario": + decisions = decisions.Where(decision.ScenarioEQ(value[0])) + default: + return "0", errors.Wrap(InvalidFilter, fmt.Sprintf("'%s' doesn't exist", param)) + } + } + + if ip_sz == 4 { + if contains { /*decision contains {start_ip,end_ip}*/ + decisions = decisions.Where(decision.And( + decision.StartIPLTE(start_ip), + decision.EndIPGTE(end_ip), + decision.IPSizeEQ(int64(ip_sz)), + )) + } else { /*decision is contained within {start_ip,end_ip}*/ + decisions = decisions.Where(decision.And( + decision.StartIPGTE(start_ip), + decision.EndIPLTE(end_ip), + decision.IPSizeEQ(int64(ip_sz)), + )) + } + } else if ip_sz == 16 { + if contains { /*decision contains {start_ip,end_ip}*/ + decisions = decisions.Where(decision.And( + //matching addr size + decision.IPSizeEQ(int64(ip_sz)), + decision.Or( + //decision.start_ip < query.start_ip + decision.StartIPLT(start_ip), + decision.And( + //decision.start_ip == query.start_ip + decision.StartIPEQ(start_ip), + //decision.start_suffix <= query.start_suffix + decision.StartSuffixLTE(start_sfx), + )), + decision.Or( + //decision.end_ip > query.end_ip + decision.EndIPGT(end_ip), + decision.And( + //decision.end_ip == query.end_ip + decision.EndIPEQ(end_ip), + //decision.end_suffix >= query.end_suffix + decision.EndSuffixGTE(end_sfx), + ), + ), + )) + } else { + decisions = decisions.Where(decision.And( + //matching addr size + decision.IPSizeEQ(int64(ip_sz)), + decision.Or( + //decision.start_ip > query.start_ip + decision.StartIPGT(start_ip), + decision.And( + //decision.start_ip == query.start_ip + decision.StartIPEQ(start_ip), + //decision.start_suffix >= query.start_suffix + decision.StartSuffixGTE(start_sfx), + )), + decision.Or( + //decision.end_ip < query.end_ip + decision.EndIPLT(end_ip), + decision.And( + //decision.end_ip == query.end_ip + decision.EndIPEQ(end_ip), + //decision.end_suffix <= query.end_suffix + decision.EndSuffixLTE(end_sfx), + ), + ), + )) + } + } else if ip_sz != 0 { + return "0", errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + } + + nbDeleted, err := decisions.Exec(c.CTX) + if err != nil { + c.Log.Warningf("DeleteDecisionsWithFilter : %s", err) + return "0", errors.Wrap(DeleteFail, "decisions with provided filter") + } + return strconv.Itoa(nbDeleted), nil +} + +// SoftDeleteDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter +func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (string, error) { + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz int + var contains bool = true + /*if contains is true, return bans that *contains* the given value (value is the inner) + else, return bans that are *contained* by the given value (value is the outer)*/ + decisions := c.Ent.Decision.Update().Where(decision.UntilGT(time.Now().UTC())) + for param, value := range filter { + switch param { + case "contains": + contains, err = strconv.ParseBool(value[0]) + if err != nil { + return "0", errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) + } + case "scopes": + decisions = decisions.Where(decision.ScopeEQ(value[0])) + case "origin": + decisions = decisions.Where(decision.OriginEQ(value[0])) + case "value": + decisions = decisions.Where(decision.ValueEQ(value[0])) + case "type": + decisions = decisions.Where(decision.TypeEQ(value[0])) + case "ip", "range": + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) + if err != nil { + return "0", errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) + } + case "scenario": + decisions = decisions.Where(decision.ScenarioEQ(value[0])) + default: + return "0", errors.Wrapf(InvalidFilter, "'%s' doesn't exist", param) + } + } + if ip_sz == 4 { + if contains { + /*Decision contains {start_ip,end_ip}*/ + decisions = decisions.Where(decision.And( + decision.StartIPLTE(start_ip), + decision.EndIPGTE(end_ip), + decision.IPSizeEQ(int64(ip_sz)), + )) + } else { + /*Decision is contained within {start_ip,end_ip}*/ + decisions = decisions.Where(decision.And( + decision.StartIPGTE(start_ip), + decision.EndIPLTE(end_ip), + decision.IPSizeEQ(int64(ip_sz)), + )) + } + } else if ip_sz == 16 { + /*decision contains {start_ip,end_ip}*/ + if contains { + decisions = decisions.Where(decision.And( + //matching addr size + decision.IPSizeEQ(int64(ip_sz)), + decision.Or( + //decision.start_ip < query.start_ip + decision.StartIPLT(start_ip), + decision.And( + //decision.start_ip == query.start_ip + decision.StartIPEQ(start_ip), + //decision.start_suffix <= query.start_suffix + decision.StartSuffixLTE(start_sfx), + )), + decision.Or( + //decision.end_ip > query.end_ip + decision.EndIPGT(end_ip), + decision.And( + //decision.end_ip == query.end_ip + decision.EndIPEQ(end_ip), + //decision.end_suffix >= query.end_suffix + decision.EndSuffixGTE(end_sfx), + ), + ), + )) + } else { + /*decision is contained within {start_ip,end_ip}*/ + decisions = decisions.Where(decision.And( + //matching addr size + decision.IPSizeEQ(int64(ip_sz)), + decision.Or( + //decision.start_ip > query.start_ip + decision.StartIPGT(start_ip), + decision.And( + //decision.start_ip == query.start_ip + decision.StartIPEQ(start_ip), + //decision.start_suffix >= query.start_suffix + decision.StartSuffixGTE(start_sfx), + )), + decision.Or( + //decision.end_ip < query.end_ip + decision.EndIPLT(end_ip), + decision.And( + //decision.end_ip == query.end_ip + decision.EndIPEQ(end_ip), + //decision.end_suffix <= query.end_suffix + decision.EndSuffixLTE(end_sfx), + ), + ), + )) + } + } else if ip_sz != 0 { + return "0", errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + } + nbDeleted, err := decisions.SetUntil(time.Now().UTC()).Save(c.CTX) + if err != nil { + c.Log.Warningf("SoftDeleteDecisionsWithFilter : %s", err) + return "0", errors.Wrap(DeleteFail, "soft delete decisions with provided filter") + } + return strconv.Itoa(nbDeleted), nil +} + +// SoftDeleteDecisionByID set the expiration of a decision to now() +func (c *Client) SoftDeleteDecisionByID(decisionID int) (int, error) { + nbUpdated, err := c.Ent.Decision.Update().Where(decision.IDEQ(decisionID)).SetUntil(time.Now().UTC()).Save(c.CTX) + if err != nil || nbUpdated == 0 { + c.Log.Warningf("SoftDeleteDecisionByID : %v (nb soft deleted: %d)", err, nbUpdated) + return 0, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) + } + + if nbUpdated == 0 { + return 0, ItemNotFound + } + return nbUpdated, nil +} + +func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz, count int + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + + if err != nil { + return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) + } + + contains := true + decisions := c.Ent.Decision.Query() + decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + if err != nil { + return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") + } + + count, err = decisions.Count(c.CTX) + if err != nil { + return 0, errors.Wrapf(err, "fail to count decisions") + } + + return count, nil +} + +func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Time) (int, error) { + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz, count int + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + + if err != nil { + return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) + } + + contains := true + decisions := c.Ent.Decision.Query().Where( + decision.CreatedAtGT(since), + ) + decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + if err != nil { + return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") + } + count, err = decisions.Count(c.CTX) + if err != nil { + return 0, errors.Wrapf(err, "fail to count decisions") + } + + return count, nil +} + +func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz int, start_ip int64, start_sfx int64, end_ip int64, end_sfx int64) (*ent.DecisionQuery, error) { + if ip_sz == 4 { + if contains { + /*Decision contains {start_ip,end_ip}*/ + decisions = decisions.Where(decision.And( + decision.StartIPLTE(start_ip), + decision.EndIPGTE(end_ip), + decision.IPSizeEQ(int64(ip_sz)), + )) + } else { + /*Decision is contained within {start_ip,end_ip}*/ + decisions = decisions.Where(decision.And( + decision.StartIPGTE(start_ip), + decision.EndIPLTE(end_ip), + decision.IPSizeEQ(int64(ip_sz)), + )) + } + } else if ip_sz == 16 { + /*decision contains {start_ip,end_ip}*/ + if contains { + decisions = decisions.Where(decision.And( + //matching addr size + decision.IPSizeEQ(int64(ip_sz)), + decision.Or( + //decision.start_ip < query.start_ip + decision.StartIPLT(start_ip), + decision.And( + //decision.start_ip == query.start_ip + decision.StartIPEQ(start_ip), + //decision.start_suffix <= query.start_suffix + decision.StartSuffixLTE(start_sfx), + )), + decision.Or( + //decision.end_ip > query.end_ip + decision.EndIPGT(end_ip), + decision.And( + //decision.end_ip == query.end_ip + decision.EndIPEQ(end_ip), + //decision.end_suffix >= query.end_suffix + decision.EndSuffixGTE(end_sfx), + ), + ), + )) + } else { + /*decision is contained within {start_ip,end_ip}*/ + decisions = decisions.Where(decision.And( + //matching addr size + decision.IPSizeEQ(int64(ip_sz)), + decision.Or( + //decision.start_ip > query.start_ip + decision.StartIPGT(start_ip), + decision.And( + //decision.start_ip == query.start_ip + decision.StartIPEQ(start_ip), + //decision.start_suffix >= query.start_suffix + decision.StartSuffixGTE(start_sfx), + )), + decision.Or( + //decision.end_ip < query.end_ip + decision.EndIPLT(end_ip), + decision.And( + //decision.end_ip == query.end_ip + decision.EndIPEQ(end_ip), + //decision.end_suffix <= query.end_suffix + decision.EndSuffixLTE(end_sfx), + ), + ), + )) + } + } else if ip_sz != 0 { + return nil, errors.Wrapf(InvalidFilter, "unknown ip size %d", ip_sz) + } + return decisions, nil +} + +func decisionPredicatesFromStr(s string, predicateFunc func(string) predicate.Decision) []predicate.Decision { + words := strings.Split(s, ",") + predicates := make([]predicate.Decision, len(words)) + for i, word := range words { + predicates[i] = predicateFunc(word) + } + return predicates +} diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go new file mode 100644 index 0000000..14f3644 --- /dev/null +++ b/pkg/database/ent/alert.go @@ -0,0 +1,432 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" +) + +// Alert is the model entity for the Alert schema. +type Alert struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt *time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt *time.Time `json:"updated_at,omitempty"` + // Scenario holds the value of the "scenario" field. + Scenario string `json:"scenario,omitempty"` + // BucketId holds the value of the "bucketId" field. + BucketId string `json:"bucketId,omitempty"` + // Message holds the value of the "message" field. + Message string `json:"message,omitempty"` + // EventsCount holds the value of the "eventsCount" field. + EventsCount int32 `json:"eventsCount,omitempty"` + // StartedAt holds the value of the "startedAt" field. + StartedAt time.Time `json:"startedAt,omitempty"` + // StoppedAt holds the value of the "stoppedAt" field. + StoppedAt time.Time `json:"stoppedAt,omitempty"` + // SourceIp holds the value of the "sourceIp" field. + SourceIp string `json:"sourceIp,omitempty"` + // SourceRange holds the value of the "sourceRange" field. + SourceRange string `json:"sourceRange,omitempty"` + // SourceAsNumber holds the value of the "sourceAsNumber" field. + SourceAsNumber string `json:"sourceAsNumber,omitempty"` + // SourceAsName holds the value of the "sourceAsName" field. + SourceAsName string `json:"sourceAsName,omitempty"` + // SourceCountry holds the value of the "sourceCountry" field. + SourceCountry string `json:"sourceCountry,omitempty"` + // SourceLatitude holds the value of the "sourceLatitude" field. + SourceLatitude float32 `json:"sourceLatitude,omitempty"` + // SourceLongitude holds the value of the "sourceLongitude" field. + SourceLongitude float32 `json:"sourceLongitude,omitempty"` + // SourceScope holds the value of the "sourceScope" field. + SourceScope string `json:"sourceScope,omitempty"` + // SourceValue holds the value of the "sourceValue" field. + SourceValue string `json:"sourceValue,omitempty"` + // Capacity holds the value of the "capacity" field. + Capacity int32 `json:"capacity,omitempty"` + // LeakSpeed holds the value of the "leakSpeed" field. + LeakSpeed string `json:"leakSpeed,omitempty"` + // ScenarioVersion holds the value of the "scenarioVersion" field. + ScenarioVersion string `json:"scenarioVersion,omitempty"` + // ScenarioHash holds the value of the "scenarioHash" field. + ScenarioHash string `json:"scenarioHash,omitempty"` + // Simulated holds the value of the "simulated" field. + Simulated bool `json:"simulated,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AlertQuery when eager-loading is set. + Edges AlertEdges `json:"edges"` + machine_alerts *int +} + +// AlertEdges holds the relations/edges for other nodes in the graph. +type AlertEdges struct { + // Owner holds the value of the owner edge. + Owner *Machine `json:"owner,omitempty"` + // Decisions holds the value of the decisions edge. + Decisions []*Decision `json:"decisions,omitempty"` + // Events holds the value of the events edge. + Events []*Event `json:"events,omitempty"` + // Metas holds the value of the metas edge. + Metas []*Meta `json:"metas,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [4]bool +} + +// OwnerOrErr returns the Owner value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AlertEdges) OwnerOrErr() (*Machine, error) { + if e.loadedTypes[0] { + if e.Owner == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: machine.Label} + } + return e.Owner, nil + } + return nil, &NotLoadedError{edge: "owner"} +} + +// DecisionsOrErr returns the Decisions value or an error if the edge +// was not loaded in eager-loading. +func (e AlertEdges) DecisionsOrErr() ([]*Decision, error) { + if e.loadedTypes[1] { + return e.Decisions, nil + } + return nil, &NotLoadedError{edge: "decisions"} +} + +// EventsOrErr returns the Events value or an error if the edge +// was not loaded in eager-loading. +func (e AlertEdges) EventsOrErr() ([]*Event, error) { + if e.loadedTypes[2] { + return e.Events, nil + } + return nil, &NotLoadedError{edge: "events"} +} + +// MetasOrErr returns the Metas value or an error if the edge +// was not loaded in eager-loading. +func (e AlertEdges) MetasOrErr() ([]*Meta, error) { + if e.loadedTypes[3] { + return e.Metas, nil + } + return nil, &NotLoadedError{edge: "metas"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Alert) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case alert.FieldSimulated: + values[i] = new(sql.NullBool) + case alert.FieldSourceLatitude, alert.FieldSourceLongitude: + values[i] = new(sql.NullFloat64) + case alert.FieldID, alert.FieldEventsCount, alert.FieldCapacity: + values[i] = new(sql.NullInt64) + case alert.FieldScenario, alert.FieldBucketId, alert.FieldMessage, alert.FieldSourceIp, alert.FieldSourceRange, alert.FieldSourceAsNumber, alert.FieldSourceAsName, alert.FieldSourceCountry, alert.FieldSourceScope, alert.FieldSourceValue, alert.FieldLeakSpeed, alert.FieldScenarioVersion, alert.FieldScenarioHash: + values[i] = new(sql.NullString) + case alert.FieldCreatedAt, alert.FieldUpdatedAt, alert.FieldStartedAt, alert.FieldStoppedAt: + values[i] = new(sql.NullTime) + case alert.ForeignKeys[0]: // machine_alerts + values[i] = new(sql.NullInt64) + default: + return nil, fmt.Errorf("unexpected column %q for type Alert", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Alert fields. +func (a *Alert) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case alert.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + a.ID = int(value.Int64) + case alert.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + a.CreatedAt = new(time.Time) + *a.CreatedAt = value.Time + } + case alert.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + a.UpdatedAt = new(time.Time) + *a.UpdatedAt = value.Time + } + case alert.FieldScenario: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scenario", values[i]) + } else if value.Valid { + a.Scenario = value.String + } + case alert.FieldBucketId: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field bucketId", values[i]) + } else if value.Valid { + a.BucketId = value.String + } + case alert.FieldMessage: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field message", values[i]) + } else if value.Valid { + a.Message = value.String + } + case alert.FieldEventsCount: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field eventsCount", values[i]) + } else if value.Valid { + a.EventsCount = int32(value.Int64) + } + case alert.FieldStartedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field startedAt", values[i]) + } else if value.Valid { + a.StartedAt = value.Time + } + case alert.FieldStoppedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field stoppedAt", values[i]) + } else if value.Valid { + a.StoppedAt = value.Time + } + case alert.FieldSourceIp: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceIp", values[i]) + } else if value.Valid { + a.SourceIp = value.String + } + case alert.FieldSourceRange: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceRange", values[i]) + } else if value.Valid { + a.SourceRange = value.String + } + case alert.FieldSourceAsNumber: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceAsNumber", values[i]) + } else if value.Valid { + a.SourceAsNumber = value.String + } + case alert.FieldSourceAsName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceAsName", values[i]) + } else if value.Valid { + a.SourceAsName = value.String + } + case alert.FieldSourceCountry: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceCountry", values[i]) + } else if value.Valid { + a.SourceCountry = value.String + } + case alert.FieldSourceLatitude: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field sourceLatitude", values[i]) + } else if value.Valid { + a.SourceLatitude = float32(value.Float64) + } + case alert.FieldSourceLongitude: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field sourceLongitude", values[i]) + } else if value.Valid { + a.SourceLongitude = float32(value.Float64) + } + case alert.FieldSourceScope: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceScope", values[i]) + } else if value.Valid { + a.SourceScope = value.String + } + case alert.FieldSourceValue: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sourceValue", values[i]) + } else if value.Valid { + a.SourceValue = value.String + } + case alert.FieldCapacity: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field capacity", values[i]) + } else if value.Valid { + a.Capacity = int32(value.Int64) + } + case alert.FieldLeakSpeed: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field leakSpeed", values[i]) + } else if value.Valid { + a.LeakSpeed = value.String + } + case alert.FieldScenarioVersion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scenarioVersion", values[i]) + } else if value.Valid { + a.ScenarioVersion = value.String + } + case alert.FieldScenarioHash: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scenarioHash", values[i]) + } else if value.Valid { + a.ScenarioHash = value.String + } + case alert.FieldSimulated: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field simulated", values[i]) + } else if value.Valid { + a.Simulated = value.Bool + } + case alert.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field machine_alerts", value) + } else if value.Valid { + a.machine_alerts = new(int) + *a.machine_alerts = int(value.Int64) + } + } + } + return nil +} + +// QueryOwner queries the "owner" edge of the Alert entity. +func (a *Alert) QueryOwner() *MachineQuery { + return (&AlertClient{config: a.config}).QueryOwner(a) +} + +// QueryDecisions queries the "decisions" edge of the Alert entity. +func (a *Alert) QueryDecisions() *DecisionQuery { + return (&AlertClient{config: a.config}).QueryDecisions(a) +} + +// QueryEvents queries the "events" edge of the Alert entity. +func (a *Alert) QueryEvents() *EventQuery { + return (&AlertClient{config: a.config}).QueryEvents(a) +} + +// QueryMetas queries the "metas" edge of the Alert entity. +func (a *Alert) QueryMetas() *MetaQuery { + return (&AlertClient{config: a.config}).QueryMetas(a) +} + +// Update returns a builder for updating this Alert. +// Note that you need to call Alert.Unwrap() before calling this method if this Alert +// was returned from a transaction, and the transaction was committed or rolled back. +func (a *Alert) Update() *AlertUpdateOne { + return (&AlertClient{config: a.config}).UpdateOne(a) +} + +// Unwrap unwraps the Alert entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (a *Alert) Unwrap() *Alert { + _tx, ok := a.config.driver.(*txDriver) + if !ok { + panic("ent: Alert is not a transactional entity") + } + a.config.driver = _tx.drv + return a +} + +// String implements the fmt.Stringer. +func (a *Alert) String() string { + var builder strings.Builder + builder.WriteString("Alert(") + builder.WriteString(fmt.Sprintf("id=%v, ", a.ID)) + if v := a.CreatedAt; v != nil { + builder.WriteString("created_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := a.UpdatedAt; v != nil { + builder.WriteString("updated_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("scenario=") + builder.WriteString(a.Scenario) + builder.WriteString(", ") + builder.WriteString("bucketId=") + builder.WriteString(a.BucketId) + builder.WriteString(", ") + builder.WriteString("message=") + builder.WriteString(a.Message) + builder.WriteString(", ") + builder.WriteString("eventsCount=") + builder.WriteString(fmt.Sprintf("%v", a.EventsCount)) + builder.WriteString(", ") + builder.WriteString("startedAt=") + builder.WriteString(a.StartedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("stoppedAt=") + builder.WriteString(a.StoppedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("sourceIp=") + builder.WriteString(a.SourceIp) + builder.WriteString(", ") + builder.WriteString("sourceRange=") + builder.WriteString(a.SourceRange) + builder.WriteString(", ") + builder.WriteString("sourceAsNumber=") + builder.WriteString(a.SourceAsNumber) + builder.WriteString(", ") + builder.WriteString("sourceAsName=") + builder.WriteString(a.SourceAsName) + builder.WriteString(", ") + builder.WriteString("sourceCountry=") + builder.WriteString(a.SourceCountry) + builder.WriteString(", ") + builder.WriteString("sourceLatitude=") + builder.WriteString(fmt.Sprintf("%v", a.SourceLatitude)) + builder.WriteString(", ") + builder.WriteString("sourceLongitude=") + builder.WriteString(fmt.Sprintf("%v", a.SourceLongitude)) + builder.WriteString(", ") + builder.WriteString("sourceScope=") + builder.WriteString(a.SourceScope) + builder.WriteString(", ") + builder.WriteString("sourceValue=") + builder.WriteString(a.SourceValue) + builder.WriteString(", ") + builder.WriteString("capacity=") + builder.WriteString(fmt.Sprintf("%v", a.Capacity)) + builder.WriteString(", ") + builder.WriteString("leakSpeed=") + builder.WriteString(a.LeakSpeed) + builder.WriteString(", ") + builder.WriteString("scenarioVersion=") + builder.WriteString(a.ScenarioVersion) + builder.WriteString(", ") + builder.WriteString("scenarioHash=") + builder.WriteString(a.ScenarioHash) + builder.WriteString(", ") + builder.WriteString("simulated=") + builder.WriteString(fmt.Sprintf("%v", a.Simulated)) + builder.WriteByte(')') + return builder.String() +} + +// Alerts is a parsable slice of Alert. +type Alerts []*Alert + +func (a Alerts) config(cfg config) { + for _i := range a { + a[_i].config = cfg + } +} diff --git a/pkg/database/ent/alert/alert.go b/pkg/database/ent/alert/alert.go new file mode 100644 index 0000000..0a0b6f2 --- /dev/null +++ b/pkg/database/ent/alert/alert.go @@ -0,0 +1,167 @@ +// Code generated by ent, DO NOT EDIT. + +package alert + +import ( + "time" +) + +const ( + // Label holds the string label denoting the alert type in the database. + Label = "alert" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldScenario holds the string denoting the scenario field in the database. + FieldScenario = "scenario" + // FieldBucketId holds the string denoting the bucketid field in the database. + FieldBucketId = "bucket_id" + // FieldMessage holds the string denoting the message field in the database. + FieldMessage = "message" + // FieldEventsCount holds the string denoting the eventscount field in the database. + FieldEventsCount = "events_count" + // FieldStartedAt holds the string denoting the startedat field in the database. + FieldStartedAt = "started_at" + // FieldStoppedAt holds the string denoting the stoppedat field in the database. + FieldStoppedAt = "stopped_at" + // FieldSourceIp holds the string denoting the sourceip field in the database. + FieldSourceIp = "source_ip" + // FieldSourceRange holds the string denoting the sourcerange field in the database. + FieldSourceRange = "source_range" + // FieldSourceAsNumber holds the string denoting the sourceasnumber field in the database. + FieldSourceAsNumber = "source_as_number" + // FieldSourceAsName holds the string denoting the sourceasname field in the database. + FieldSourceAsName = "source_as_name" + // FieldSourceCountry holds the string denoting the sourcecountry field in the database. + FieldSourceCountry = "source_country" + // FieldSourceLatitude holds the string denoting the sourcelatitude field in the database. + FieldSourceLatitude = "source_latitude" + // FieldSourceLongitude holds the string denoting the sourcelongitude field in the database. + FieldSourceLongitude = "source_longitude" + // FieldSourceScope holds the string denoting the sourcescope field in the database. + FieldSourceScope = "source_scope" + // FieldSourceValue holds the string denoting the sourcevalue field in the database. + FieldSourceValue = "source_value" + // FieldCapacity holds the string denoting the capacity field in the database. + FieldCapacity = "capacity" + // FieldLeakSpeed holds the string denoting the leakspeed field in the database. + FieldLeakSpeed = "leak_speed" + // FieldScenarioVersion holds the string denoting the scenarioversion field in the database. + FieldScenarioVersion = "scenario_version" + // FieldScenarioHash holds the string denoting the scenariohash field in the database. + FieldScenarioHash = "scenario_hash" + // FieldSimulated holds the string denoting the simulated field in the database. + FieldSimulated = "simulated" + // EdgeOwner holds the string denoting the owner edge name in mutations. + EdgeOwner = "owner" + // EdgeDecisions holds the string denoting the decisions edge name in mutations. + EdgeDecisions = "decisions" + // EdgeEvents holds the string denoting the events edge name in mutations. + EdgeEvents = "events" + // EdgeMetas holds the string denoting the metas edge name in mutations. + EdgeMetas = "metas" + // Table holds the table name of the alert in the database. + Table = "alerts" + // OwnerTable is the table that holds the owner relation/edge. + OwnerTable = "alerts" + // OwnerInverseTable is the table name for the Machine entity. + // It exists in this package in order to avoid circular dependency with the "machine" package. + OwnerInverseTable = "machines" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "machine_alerts" + // DecisionsTable is the table that holds the decisions relation/edge. + DecisionsTable = "decisions" + // DecisionsInverseTable is the table name for the Decision entity. + // It exists in this package in order to avoid circular dependency with the "decision" package. + DecisionsInverseTable = "decisions" + // DecisionsColumn is the table column denoting the decisions relation/edge. + DecisionsColumn = "alert_decisions" + // EventsTable is the table that holds the events relation/edge. + EventsTable = "events" + // EventsInverseTable is the table name for the Event entity. + // It exists in this package in order to avoid circular dependency with the "event" package. + EventsInverseTable = "events" + // EventsColumn is the table column denoting the events relation/edge. + EventsColumn = "alert_events" + // MetasTable is the table that holds the metas relation/edge. + MetasTable = "meta" + // MetasInverseTable is the table name for the Meta entity. + // It exists in this package in order to avoid circular dependency with the "meta" package. + MetasInverseTable = "meta" + // MetasColumn is the table column denoting the metas relation/edge. + MetasColumn = "alert_metas" +) + +// Columns holds all SQL columns for alert fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldScenario, + FieldBucketId, + FieldMessage, + FieldEventsCount, + FieldStartedAt, + FieldStoppedAt, + FieldSourceIp, + FieldSourceRange, + FieldSourceAsNumber, + FieldSourceAsName, + FieldSourceCountry, + FieldSourceLatitude, + FieldSourceLongitude, + FieldSourceScope, + FieldSourceValue, + FieldCapacity, + FieldLeakSpeed, + FieldScenarioVersion, + FieldScenarioHash, + FieldSimulated, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "alerts" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "machine_alerts", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. + UpdateDefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DefaultBucketId holds the default value on creation for the "bucketId" field. + DefaultBucketId string + // DefaultMessage holds the default value on creation for the "message" field. + DefaultMessage string + // DefaultEventsCount holds the default value on creation for the "eventsCount" field. + DefaultEventsCount int32 + // DefaultStartedAt holds the default value on creation for the "startedAt" field. + DefaultStartedAt func() time.Time + // DefaultStoppedAt holds the default value on creation for the "stoppedAt" field. + DefaultStoppedAt func() time.Time + // DefaultSimulated holds the default value on creation for the "simulated" field. + DefaultSimulated bool +) diff --git a/pkg/database/ent/alert/where.go b/pkg/database/ent/alert/where.go new file mode 100644 index 0000000..a106fa0 --- /dev/null +++ b/pkg/database/ent/alert/where.go @@ -0,0 +1,2473 @@ +// Code generated by ent, DO NOT EDIT. + +package alert + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Scenario applies equality check predicate on the "scenario" field. It's identical to ScenarioEQ. +func Scenario(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenario), v)) + }) +} + +// BucketId applies equality check predicate on the "bucketId" field. It's identical to BucketIdEQ. +func BucketId(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldBucketId), v)) + }) +} + +// Message applies equality check predicate on the "message" field. It's identical to MessageEQ. +func Message(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMessage), v)) + }) +} + +// EventsCount applies equality check predicate on the "eventsCount" field. It's identical to EventsCountEQ. +func EventsCount(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldEventsCount), v)) + }) +} + +// StartedAt applies equality check predicate on the "startedAt" field. It's identical to StartedAtEQ. +func StartedAt(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStartedAt), v)) + }) +} + +// StoppedAt applies equality check predicate on the "stoppedAt" field. It's identical to StoppedAtEQ. +func StoppedAt(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStoppedAt), v)) + }) +} + +// SourceIp applies equality check predicate on the "sourceIp" field. It's identical to SourceIpEQ. +func SourceIp(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceIp), v)) + }) +} + +// SourceRange applies equality check predicate on the "sourceRange" field. It's identical to SourceRangeEQ. +func SourceRange(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceRange), v)) + }) +} + +// SourceAsNumber applies equality check predicate on the "sourceAsNumber" field. It's identical to SourceAsNumberEQ. +func SourceAsNumber(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsName applies equality check predicate on the "sourceAsName" field. It's identical to SourceAsNameEQ. +func SourceAsName(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceAsName), v)) + }) +} + +// SourceCountry applies equality check predicate on the "sourceCountry" field. It's identical to SourceCountryEQ. +func SourceCountry(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceCountry), v)) + }) +} + +// SourceLatitude applies equality check predicate on the "sourceLatitude" field. It's identical to SourceLatitudeEQ. +func SourceLatitude(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLongitude applies equality check predicate on the "sourceLongitude" field. It's identical to SourceLongitudeEQ. +func SourceLongitude(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceScope applies equality check predicate on the "sourceScope" field. It's identical to SourceScopeEQ. +func SourceScope(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceScope), v)) + }) +} + +// SourceValue applies equality check predicate on the "sourceValue" field. It's identical to SourceValueEQ. +func SourceValue(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceValue), v)) + }) +} + +// Capacity applies equality check predicate on the "capacity" field. It's identical to CapacityEQ. +func Capacity(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCapacity), v)) + }) +} + +// LeakSpeed applies equality check predicate on the "leakSpeed" field. It's identical to LeakSpeedEQ. +func LeakSpeed(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLeakSpeed), v)) + }) +} + +// ScenarioVersion applies equality check predicate on the "scenarioVersion" field. It's identical to ScenarioVersionEQ. +func ScenarioVersion(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioHash applies equality check predicate on the "scenarioHash" field. It's identical to ScenarioHashEQ. +func ScenarioHash(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarioHash), v)) + }) +} + +// Simulated applies equality check predicate on the "simulated" field. It's identical to SimulatedEQ. +func Simulated(v bool) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSimulated), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. +func CreatedAtIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldCreatedAt))) + }) +} + +// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. +func CreatedAtNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldCreatedAt))) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. +func UpdatedAtIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldUpdatedAt))) + }) +} + +// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. +func UpdatedAtNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldUpdatedAt))) + }) +} + +// ScenarioEQ applies the EQ predicate on the "scenario" field. +func ScenarioEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenario), v)) + }) +} + +// ScenarioNEQ applies the NEQ predicate on the "scenario" field. +func ScenarioNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScenario), v)) + }) +} + +// ScenarioIn applies the In predicate on the "scenario" field. +func ScenarioIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldScenario), v...)) + }) +} + +// ScenarioNotIn applies the NotIn predicate on the "scenario" field. +func ScenarioNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldScenario), v...)) + }) +} + +// ScenarioGT applies the GT predicate on the "scenario" field. +func ScenarioGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScenario), v)) + }) +} + +// ScenarioGTE applies the GTE predicate on the "scenario" field. +func ScenarioGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScenario), v)) + }) +} + +// ScenarioLT applies the LT predicate on the "scenario" field. +func ScenarioLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScenario), v)) + }) +} + +// ScenarioLTE applies the LTE predicate on the "scenario" field. +func ScenarioLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScenario), v)) + }) +} + +// ScenarioContains applies the Contains predicate on the "scenario" field. +func ScenarioContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScenario), v)) + }) +} + +// ScenarioHasPrefix applies the HasPrefix predicate on the "scenario" field. +func ScenarioHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScenario), v)) + }) +} + +// ScenarioHasSuffix applies the HasSuffix predicate on the "scenario" field. +func ScenarioHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScenario), v)) + }) +} + +// ScenarioEqualFold applies the EqualFold predicate on the "scenario" field. +func ScenarioEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScenario), v)) + }) +} + +// ScenarioContainsFold applies the ContainsFold predicate on the "scenario" field. +func ScenarioContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScenario), v)) + }) +} + +// BucketIdEQ applies the EQ predicate on the "bucketId" field. +func BucketIdEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldBucketId), v)) + }) +} + +// BucketIdNEQ applies the NEQ predicate on the "bucketId" field. +func BucketIdNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldBucketId), v)) + }) +} + +// BucketIdIn applies the In predicate on the "bucketId" field. +func BucketIdIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldBucketId), v...)) + }) +} + +// BucketIdNotIn applies the NotIn predicate on the "bucketId" field. +func BucketIdNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldBucketId), v...)) + }) +} + +// BucketIdGT applies the GT predicate on the "bucketId" field. +func BucketIdGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldBucketId), v)) + }) +} + +// BucketIdGTE applies the GTE predicate on the "bucketId" field. +func BucketIdGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldBucketId), v)) + }) +} + +// BucketIdLT applies the LT predicate on the "bucketId" field. +func BucketIdLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldBucketId), v)) + }) +} + +// BucketIdLTE applies the LTE predicate on the "bucketId" field. +func BucketIdLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldBucketId), v)) + }) +} + +// BucketIdContains applies the Contains predicate on the "bucketId" field. +func BucketIdContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldBucketId), v)) + }) +} + +// BucketIdHasPrefix applies the HasPrefix predicate on the "bucketId" field. +func BucketIdHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldBucketId), v)) + }) +} + +// BucketIdHasSuffix applies the HasSuffix predicate on the "bucketId" field. +func BucketIdHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldBucketId), v)) + }) +} + +// BucketIdIsNil applies the IsNil predicate on the "bucketId" field. +func BucketIdIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldBucketId))) + }) +} + +// BucketIdNotNil applies the NotNil predicate on the "bucketId" field. +func BucketIdNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldBucketId))) + }) +} + +// BucketIdEqualFold applies the EqualFold predicate on the "bucketId" field. +func BucketIdEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldBucketId), v)) + }) +} + +// BucketIdContainsFold applies the ContainsFold predicate on the "bucketId" field. +func BucketIdContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldBucketId), v)) + }) +} + +// MessageEQ applies the EQ predicate on the "message" field. +func MessageEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMessage), v)) + }) +} + +// MessageNEQ applies the NEQ predicate on the "message" field. +func MessageNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldMessage), v)) + }) +} + +// MessageIn applies the In predicate on the "message" field. +func MessageIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldMessage), v...)) + }) +} + +// MessageNotIn applies the NotIn predicate on the "message" field. +func MessageNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldMessage), v...)) + }) +} + +// MessageGT applies the GT predicate on the "message" field. +func MessageGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldMessage), v)) + }) +} + +// MessageGTE applies the GTE predicate on the "message" field. +func MessageGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldMessage), v)) + }) +} + +// MessageLT applies the LT predicate on the "message" field. +func MessageLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldMessage), v)) + }) +} + +// MessageLTE applies the LTE predicate on the "message" field. +func MessageLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldMessage), v)) + }) +} + +// MessageContains applies the Contains predicate on the "message" field. +func MessageContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldMessage), v)) + }) +} + +// MessageHasPrefix applies the HasPrefix predicate on the "message" field. +func MessageHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldMessage), v)) + }) +} + +// MessageHasSuffix applies the HasSuffix predicate on the "message" field. +func MessageHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldMessage), v)) + }) +} + +// MessageIsNil applies the IsNil predicate on the "message" field. +func MessageIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldMessage))) + }) +} + +// MessageNotNil applies the NotNil predicate on the "message" field. +func MessageNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldMessage))) + }) +} + +// MessageEqualFold applies the EqualFold predicate on the "message" field. +func MessageEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldMessage), v)) + }) +} + +// MessageContainsFold applies the ContainsFold predicate on the "message" field. +func MessageContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldMessage), v)) + }) +} + +// EventsCountEQ applies the EQ predicate on the "eventsCount" field. +func EventsCountEQ(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountNEQ applies the NEQ predicate on the "eventsCount" field. +func EventsCountNEQ(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountIn applies the In predicate on the "eventsCount" field. +func EventsCountIn(vs ...int32) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldEventsCount), v...)) + }) +} + +// EventsCountNotIn applies the NotIn predicate on the "eventsCount" field. +func EventsCountNotIn(vs ...int32) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldEventsCount), v...)) + }) +} + +// EventsCountGT applies the GT predicate on the "eventsCount" field. +func EventsCountGT(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountGTE applies the GTE predicate on the "eventsCount" field. +func EventsCountGTE(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountLT applies the LT predicate on the "eventsCount" field. +func EventsCountLT(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountLTE applies the LTE predicate on the "eventsCount" field. +func EventsCountLTE(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldEventsCount), v)) + }) +} + +// EventsCountIsNil applies the IsNil predicate on the "eventsCount" field. +func EventsCountIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldEventsCount))) + }) +} + +// EventsCountNotNil applies the NotNil predicate on the "eventsCount" field. +func EventsCountNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldEventsCount))) + }) +} + +// StartedAtEQ applies the EQ predicate on the "startedAt" field. +func StartedAtEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtNEQ applies the NEQ predicate on the "startedAt" field. +func StartedAtNEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtIn applies the In predicate on the "startedAt" field. +func StartedAtIn(vs ...time.Time) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldStartedAt), v...)) + }) +} + +// StartedAtNotIn applies the NotIn predicate on the "startedAt" field. +func StartedAtNotIn(vs ...time.Time) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldStartedAt), v...)) + }) +} + +// StartedAtGT applies the GT predicate on the "startedAt" field. +func StartedAtGT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtGTE applies the GTE predicate on the "startedAt" field. +func StartedAtGTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtLT applies the LT predicate on the "startedAt" field. +func StartedAtLT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtLTE applies the LTE predicate on the "startedAt" field. +func StartedAtLTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldStartedAt), v)) + }) +} + +// StartedAtIsNil applies the IsNil predicate on the "startedAt" field. +func StartedAtIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldStartedAt))) + }) +} + +// StartedAtNotNil applies the NotNil predicate on the "startedAt" field. +func StartedAtNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldStartedAt))) + }) +} + +// StoppedAtEQ applies the EQ predicate on the "stoppedAt" field. +func StoppedAtEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtNEQ applies the NEQ predicate on the "stoppedAt" field. +func StoppedAtNEQ(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtIn applies the In predicate on the "stoppedAt" field. +func StoppedAtIn(vs ...time.Time) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldStoppedAt), v...)) + }) +} + +// StoppedAtNotIn applies the NotIn predicate on the "stoppedAt" field. +func StoppedAtNotIn(vs ...time.Time) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldStoppedAt), v...)) + }) +} + +// StoppedAtGT applies the GT predicate on the "stoppedAt" field. +func StoppedAtGT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtGTE applies the GTE predicate on the "stoppedAt" field. +func StoppedAtGTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtLT applies the LT predicate on the "stoppedAt" field. +func StoppedAtLT(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtLTE applies the LTE predicate on the "stoppedAt" field. +func StoppedAtLTE(v time.Time) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldStoppedAt), v)) + }) +} + +// StoppedAtIsNil applies the IsNil predicate on the "stoppedAt" field. +func StoppedAtIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldStoppedAt))) + }) +} + +// StoppedAtNotNil applies the NotNil predicate on the "stoppedAt" field. +func StoppedAtNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldStoppedAt))) + }) +} + +// SourceIpEQ applies the EQ predicate on the "sourceIp" field. +func SourceIpEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpNEQ applies the NEQ predicate on the "sourceIp" field. +func SourceIpNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpIn applies the In predicate on the "sourceIp" field. +func SourceIpIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldSourceIp), v...)) + }) +} + +// SourceIpNotIn applies the NotIn predicate on the "sourceIp" field. +func SourceIpNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldSourceIp), v...)) + }) +} + +// SourceIpGT applies the GT predicate on the "sourceIp" field. +func SourceIpGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpGTE applies the GTE predicate on the "sourceIp" field. +func SourceIpGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpLT applies the LT predicate on the "sourceIp" field. +func SourceIpLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpLTE applies the LTE predicate on the "sourceIp" field. +func SourceIpLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpContains applies the Contains predicate on the "sourceIp" field. +func SourceIpContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpHasPrefix applies the HasPrefix predicate on the "sourceIp" field. +func SourceIpHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpHasSuffix applies the HasSuffix predicate on the "sourceIp" field. +func SourceIpHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpIsNil applies the IsNil predicate on the "sourceIp" field. +func SourceIpIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceIp))) + }) +} + +// SourceIpNotNil applies the NotNil predicate on the "sourceIp" field. +func SourceIpNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceIp))) + }) +} + +// SourceIpEqualFold applies the EqualFold predicate on the "sourceIp" field. +func SourceIpEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceIp), v)) + }) +} + +// SourceIpContainsFold applies the ContainsFold predicate on the "sourceIp" field. +func SourceIpContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceIp), v)) + }) +} + +// SourceRangeEQ applies the EQ predicate on the "sourceRange" field. +func SourceRangeEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeNEQ applies the NEQ predicate on the "sourceRange" field. +func SourceRangeNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeIn applies the In predicate on the "sourceRange" field. +func SourceRangeIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldSourceRange), v...)) + }) +} + +// SourceRangeNotIn applies the NotIn predicate on the "sourceRange" field. +func SourceRangeNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldSourceRange), v...)) + }) +} + +// SourceRangeGT applies the GT predicate on the "sourceRange" field. +func SourceRangeGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeGTE applies the GTE predicate on the "sourceRange" field. +func SourceRangeGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeLT applies the LT predicate on the "sourceRange" field. +func SourceRangeLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeLTE applies the LTE predicate on the "sourceRange" field. +func SourceRangeLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeContains applies the Contains predicate on the "sourceRange" field. +func SourceRangeContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeHasPrefix applies the HasPrefix predicate on the "sourceRange" field. +func SourceRangeHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeHasSuffix applies the HasSuffix predicate on the "sourceRange" field. +func SourceRangeHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeIsNil applies the IsNil predicate on the "sourceRange" field. +func SourceRangeIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceRange))) + }) +} + +// SourceRangeNotNil applies the NotNil predicate on the "sourceRange" field. +func SourceRangeNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceRange))) + }) +} + +// SourceRangeEqualFold applies the EqualFold predicate on the "sourceRange" field. +func SourceRangeEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceRange), v)) + }) +} + +// SourceRangeContainsFold applies the ContainsFold predicate on the "sourceRange" field. +func SourceRangeContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceRange), v)) + }) +} + +// SourceAsNumberEQ applies the EQ predicate on the "sourceAsNumber" field. +func SourceAsNumberEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberNEQ applies the NEQ predicate on the "sourceAsNumber" field. +func SourceAsNumberNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberIn applies the In predicate on the "sourceAsNumber" field. +func SourceAsNumberIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldSourceAsNumber), v...)) + }) +} + +// SourceAsNumberNotIn applies the NotIn predicate on the "sourceAsNumber" field. +func SourceAsNumberNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldSourceAsNumber), v...)) + }) +} + +// SourceAsNumberGT applies the GT predicate on the "sourceAsNumber" field. +func SourceAsNumberGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberGTE applies the GTE predicate on the "sourceAsNumber" field. +func SourceAsNumberGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberLT applies the LT predicate on the "sourceAsNumber" field. +func SourceAsNumberLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberLTE applies the LTE predicate on the "sourceAsNumber" field. +func SourceAsNumberLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberContains applies the Contains predicate on the "sourceAsNumber" field. +func SourceAsNumberContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberHasPrefix applies the HasPrefix predicate on the "sourceAsNumber" field. +func SourceAsNumberHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberHasSuffix applies the HasSuffix predicate on the "sourceAsNumber" field. +func SourceAsNumberHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberIsNil applies the IsNil predicate on the "sourceAsNumber" field. +func SourceAsNumberIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceAsNumber))) + }) +} + +// SourceAsNumberNotNil applies the NotNil predicate on the "sourceAsNumber" field. +func SourceAsNumberNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceAsNumber))) + }) +} + +// SourceAsNumberEqualFold applies the EqualFold predicate on the "sourceAsNumber" field. +func SourceAsNumberEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNumberContainsFold applies the ContainsFold predicate on the "sourceAsNumber" field. +func SourceAsNumberContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceAsNumber), v)) + }) +} + +// SourceAsNameEQ applies the EQ predicate on the "sourceAsName" field. +func SourceAsNameEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameNEQ applies the NEQ predicate on the "sourceAsName" field. +func SourceAsNameNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameIn applies the In predicate on the "sourceAsName" field. +func SourceAsNameIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldSourceAsName), v...)) + }) +} + +// SourceAsNameNotIn applies the NotIn predicate on the "sourceAsName" field. +func SourceAsNameNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldSourceAsName), v...)) + }) +} + +// SourceAsNameGT applies the GT predicate on the "sourceAsName" field. +func SourceAsNameGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameGTE applies the GTE predicate on the "sourceAsName" field. +func SourceAsNameGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameLT applies the LT predicate on the "sourceAsName" field. +func SourceAsNameLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameLTE applies the LTE predicate on the "sourceAsName" field. +func SourceAsNameLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameContains applies the Contains predicate on the "sourceAsName" field. +func SourceAsNameContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameHasPrefix applies the HasPrefix predicate on the "sourceAsName" field. +func SourceAsNameHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameHasSuffix applies the HasSuffix predicate on the "sourceAsName" field. +func SourceAsNameHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameIsNil applies the IsNil predicate on the "sourceAsName" field. +func SourceAsNameIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceAsName))) + }) +} + +// SourceAsNameNotNil applies the NotNil predicate on the "sourceAsName" field. +func SourceAsNameNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceAsName))) + }) +} + +// SourceAsNameEqualFold applies the EqualFold predicate on the "sourceAsName" field. +func SourceAsNameEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceAsName), v)) + }) +} + +// SourceAsNameContainsFold applies the ContainsFold predicate on the "sourceAsName" field. +func SourceAsNameContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceAsName), v)) + }) +} + +// SourceCountryEQ applies the EQ predicate on the "sourceCountry" field. +func SourceCountryEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryNEQ applies the NEQ predicate on the "sourceCountry" field. +func SourceCountryNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryIn applies the In predicate on the "sourceCountry" field. +func SourceCountryIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldSourceCountry), v...)) + }) +} + +// SourceCountryNotIn applies the NotIn predicate on the "sourceCountry" field. +func SourceCountryNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldSourceCountry), v...)) + }) +} + +// SourceCountryGT applies the GT predicate on the "sourceCountry" field. +func SourceCountryGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryGTE applies the GTE predicate on the "sourceCountry" field. +func SourceCountryGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryLT applies the LT predicate on the "sourceCountry" field. +func SourceCountryLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryLTE applies the LTE predicate on the "sourceCountry" field. +func SourceCountryLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryContains applies the Contains predicate on the "sourceCountry" field. +func SourceCountryContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryHasPrefix applies the HasPrefix predicate on the "sourceCountry" field. +func SourceCountryHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryHasSuffix applies the HasSuffix predicate on the "sourceCountry" field. +func SourceCountryHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryIsNil applies the IsNil predicate on the "sourceCountry" field. +func SourceCountryIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceCountry))) + }) +} + +// SourceCountryNotNil applies the NotNil predicate on the "sourceCountry" field. +func SourceCountryNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceCountry))) + }) +} + +// SourceCountryEqualFold applies the EqualFold predicate on the "sourceCountry" field. +func SourceCountryEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceCountry), v)) + }) +} + +// SourceCountryContainsFold applies the ContainsFold predicate on the "sourceCountry" field. +func SourceCountryContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceCountry), v)) + }) +} + +// SourceLatitudeEQ applies the EQ predicate on the "sourceLatitude" field. +func SourceLatitudeEQ(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeNEQ applies the NEQ predicate on the "sourceLatitude" field. +func SourceLatitudeNEQ(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeIn applies the In predicate on the "sourceLatitude" field. +func SourceLatitudeIn(vs ...float32) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldSourceLatitude), v...)) + }) +} + +// SourceLatitudeNotIn applies the NotIn predicate on the "sourceLatitude" field. +func SourceLatitudeNotIn(vs ...float32) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldSourceLatitude), v...)) + }) +} + +// SourceLatitudeGT applies the GT predicate on the "sourceLatitude" field. +func SourceLatitudeGT(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeGTE applies the GTE predicate on the "sourceLatitude" field. +func SourceLatitudeGTE(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeLT applies the LT predicate on the "sourceLatitude" field. +func SourceLatitudeLT(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeLTE applies the LTE predicate on the "sourceLatitude" field. +func SourceLatitudeLTE(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceLatitude), v)) + }) +} + +// SourceLatitudeIsNil applies the IsNil predicate on the "sourceLatitude" field. +func SourceLatitudeIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceLatitude))) + }) +} + +// SourceLatitudeNotNil applies the NotNil predicate on the "sourceLatitude" field. +func SourceLatitudeNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceLatitude))) + }) +} + +// SourceLongitudeEQ applies the EQ predicate on the "sourceLongitude" field. +func SourceLongitudeEQ(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeNEQ applies the NEQ predicate on the "sourceLongitude" field. +func SourceLongitudeNEQ(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeIn applies the In predicate on the "sourceLongitude" field. +func SourceLongitudeIn(vs ...float32) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldSourceLongitude), v...)) + }) +} + +// SourceLongitudeNotIn applies the NotIn predicate on the "sourceLongitude" field. +func SourceLongitudeNotIn(vs ...float32) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldSourceLongitude), v...)) + }) +} + +// SourceLongitudeGT applies the GT predicate on the "sourceLongitude" field. +func SourceLongitudeGT(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeGTE applies the GTE predicate on the "sourceLongitude" field. +func SourceLongitudeGTE(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeLT applies the LT predicate on the "sourceLongitude" field. +func SourceLongitudeLT(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeLTE applies the LTE predicate on the "sourceLongitude" field. +func SourceLongitudeLTE(v float32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceLongitude), v)) + }) +} + +// SourceLongitudeIsNil applies the IsNil predicate on the "sourceLongitude" field. +func SourceLongitudeIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceLongitude))) + }) +} + +// SourceLongitudeNotNil applies the NotNil predicate on the "sourceLongitude" field. +func SourceLongitudeNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceLongitude))) + }) +} + +// SourceScopeEQ applies the EQ predicate on the "sourceScope" field. +func SourceScopeEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeNEQ applies the NEQ predicate on the "sourceScope" field. +func SourceScopeNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeIn applies the In predicate on the "sourceScope" field. +func SourceScopeIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldSourceScope), v...)) + }) +} + +// SourceScopeNotIn applies the NotIn predicate on the "sourceScope" field. +func SourceScopeNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldSourceScope), v...)) + }) +} + +// SourceScopeGT applies the GT predicate on the "sourceScope" field. +func SourceScopeGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeGTE applies the GTE predicate on the "sourceScope" field. +func SourceScopeGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeLT applies the LT predicate on the "sourceScope" field. +func SourceScopeLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeLTE applies the LTE predicate on the "sourceScope" field. +func SourceScopeLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeContains applies the Contains predicate on the "sourceScope" field. +func SourceScopeContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeHasPrefix applies the HasPrefix predicate on the "sourceScope" field. +func SourceScopeHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeHasSuffix applies the HasSuffix predicate on the "sourceScope" field. +func SourceScopeHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeIsNil applies the IsNil predicate on the "sourceScope" field. +func SourceScopeIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceScope))) + }) +} + +// SourceScopeNotNil applies the NotNil predicate on the "sourceScope" field. +func SourceScopeNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceScope))) + }) +} + +// SourceScopeEqualFold applies the EqualFold predicate on the "sourceScope" field. +func SourceScopeEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceScope), v)) + }) +} + +// SourceScopeContainsFold applies the ContainsFold predicate on the "sourceScope" field. +func SourceScopeContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceScope), v)) + }) +} + +// SourceValueEQ applies the EQ predicate on the "sourceValue" field. +func SourceValueEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueNEQ applies the NEQ predicate on the "sourceValue" field. +func SourceValueNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueIn applies the In predicate on the "sourceValue" field. +func SourceValueIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldSourceValue), v...)) + }) +} + +// SourceValueNotIn applies the NotIn predicate on the "sourceValue" field. +func SourceValueNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldSourceValue), v...)) + }) +} + +// SourceValueGT applies the GT predicate on the "sourceValue" field. +func SourceValueGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueGTE applies the GTE predicate on the "sourceValue" field. +func SourceValueGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueLT applies the LT predicate on the "sourceValue" field. +func SourceValueLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueLTE applies the LTE predicate on the "sourceValue" field. +func SourceValueLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueContains applies the Contains predicate on the "sourceValue" field. +func SourceValueContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueHasPrefix applies the HasPrefix predicate on the "sourceValue" field. +func SourceValueHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueHasSuffix applies the HasSuffix predicate on the "sourceValue" field. +func SourceValueHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueIsNil applies the IsNil predicate on the "sourceValue" field. +func SourceValueIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldSourceValue))) + }) +} + +// SourceValueNotNil applies the NotNil predicate on the "sourceValue" field. +func SourceValueNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldSourceValue))) + }) +} + +// SourceValueEqualFold applies the EqualFold predicate on the "sourceValue" field. +func SourceValueEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSourceValue), v)) + }) +} + +// SourceValueContainsFold applies the ContainsFold predicate on the "sourceValue" field. +func SourceValueContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSourceValue), v)) + }) +} + +// CapacityEQ applies the EQ predicate on the "capacity" field. +func CapacityEQ(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCapacity), v)) + }) +} + +// CapacityNEQ applies the NEQ predicate on the "capacity" field. +func CapacityNEQ(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCapacity), v)) + }) +} + +// CapacityIn applies the In predicate on the "capacity" field. +func CapacityIn(vs ...int32) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCapacity), v...)) + }) +} + +// CapacityNotIn applies the NotIn predicate on the "capacity" field. +func CapacityNotIn(vs ...int32) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCapacity), v...)) + }) +} + +// CapacityGT applies the GT predicate on the "capacity" field. +func CapacityGT(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCapacity), v)) + }) +} + +// CapacityGTE applies the GTE predicate on the "capacity" field. +func CapacityGTE(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCapacity), v)) + }) +} + +// CapacityLT applies the LT predicate on the "capacity" field. +func CapacityLT(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCapacity), v)) + }) +} + +// CapacityLTE applies the LTE predicate on the "capacity" field. +func CapacityLTE(v int32) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCapacity), v)) + }) +} + +// CapacityIsNil applies the IsNil predicate on the "capacity" field. +func CapacityIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldCapacity))) + }) +} + +// CapacityNotNil applies the NotNil predicate on the "capacity" field. +func CapacityNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldCapacity))) + }) +} + +// LeakSpeedEQ applies the EQ predicate on the "leakSpeed" field. +func LeakSpeedEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedNEQ applies the NEQ predicate on the "leakSpeed" field. +func LeakSpeedNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedIn applies the In predicate on the "leakSpeed" field. +func LeakSpeedIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldLeakSpeed), v...)) + }) +} + +// LeakSpeedNotIn applies the NotIn predicate on the "leakSpeed" field. +func LeakSpeedNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldLeakSpeed), v...)) + }) +} + +// LeakSpeedGT applies the GT predicate on the "leakSpeed" field. +func LeakSpeedGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedGTE applies the GTE predicate on the "leakSpeed" field. +func LeakSpeedGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedLT applies the LT predicate on the "leakSpeed" field. +func LeakSpeedLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedLTE applies the LTE predicate on the "leakSpeed" field. +func LeakSpeedLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedContains applies the Contains predicate on the "leakSpeed" field. +func LeakSpeedContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedHasPrefix applies the HasPrefix predicate on the "leakSpeed" field. +func LeakSpeedHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedHasSuffix applies the HasSuffix predicate on the "leakSpeed" field. +func LeakSpeedHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedIsNil applies the IsNil predicate on the "leakSpeed" field. +func LeakSpeedIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldLeakSpeed))) + }) +} + +// LeakSpeedNotNil applies the NotNil predicate on the "leakSpeed" field. +func LeakSpeedNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldLeakSpeed))) + }) +} + +// LeakSpeedEqualFold applies the EqualFold predicate on the "leakSpeed" field. +func LeakSpeedEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldLeakSpeed), v)) + }) +} + +// LeakSpeedContainsFold applies the ContainsFold predicate on the "leakSpeed" field. +func LeakSpeedContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldLeakSpeed), v)) + }) +} + +// ScenarioVersionEQ applies the EQ predicate on the "scenarioVersion" field. +func ScenarioVersionEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionNEQ applies the NEQ predicate on the "scenarioVersion" field. +func ScenarioVersionNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionIn applies the In predicate on the "scenarioVersion" field. +func ScenarioVersionIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldScenarioVersion), v...)) + }) +} + +// ScenarioVersionNotIn applies the NotIn predicate on the "scenarioVersion" field. +func ScenarioVersionNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldScenarioVersion), v...)) + }) +} + +// ScenarioVersionGT applies the GT predicate on the "scenarioVersion" field. +func ScenarioVersionGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionGTE applies the GTE predicate on the "scenarioVersion" field. +func ScenarioVersionGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionLT applies the LT predicate on the "scenarioVersion" field. +func ScenarioVersionLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionLTE applies the LTE predicate on the "scenarioVersion" field. +func ScenarioVersionLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionContains applies the Contains predicate on the "scenarioVersion" field. +func ScenarioVersionContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionHasPrefix applies the HasPrefix predicate on the "scenarioVersion" field. +func ScenarioVersionHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionHasSuffix applies the HasSuffix predicate on the "scenarioVersion" field. +func ScenarioVersionHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionIsNil applies the IsNil predicate on the "scenarioVersion" field. +func ScenarioVersionIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldScenarioVersion))) + }) +} + +// ScenarioVersionNotNil applies the NotNil predicate on the "scenarioVersion" field. +func ScenarioVersionNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldScenarioVersion))) + }) +} + +// ScenarioVersionEqualFold applies the EqualFold predicate on the "scenarioVersion" field. +func ScenarioVersionEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioVersionContainsFold applies the ContainsFold predicate on the "scenarioVersion" field. +func ScenarioVersionContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScenarioVersion), v)) + }) +} + +// ScenarioHashEQ applies the EQ predicate on the "scenarioHash" field. +func ScenarioHashEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashNEQ applies the NEQ predicate on the "scenarioHash" field. +func ScenarioHashNEQ(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashIn applies the In predicate on the "scenarioHash" field. +func ScenarioHashIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldScenarioHash), v...)) + }) +} + +// ScenarioHashNotIn applies the NotIn predicate on the "scenarioHash" field. +func ScenarioHashNotIn(vs ...string) predicate.Alert { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldScenarioHash), v...)) + }) +} + +// ScenarioHashGT applies the GT predicate on the "scenarioHash" field. +func ScenarioHashGT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashGTE applies the GTE predicate on the "scenarioHash" field. +func ScenarioHashGTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashLT applies the LT predicate on the "scenarioHash" field. +func ScenarioHashLT(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashLTE applies the LTE predicate on the "scenarioHash" field. +func ScenarioHashLTE(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashContains applies the Contains predicate on the "scenarioHash" field. +func ScenarioHashContains(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashHasPrefix applies the HasPrefix predicate on the "scenarioHash" field. +func ScenarioHashHasPrefix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashHasSuffix applies the HasSuffix predicate on the "scenarioHash" field. +func ScenarioHashHasSuffix(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashIsNil applies the IsNil predicate on the "scenarioHash" field. +func ScenarioHashIsNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldScenarioHash))) + }) +} + +// ScenarioHashNotNil applies the NotNil predicate on the "scenarioHash" field. +func ScenarioHashNotNil() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldScenarioHash))) + }) +} + +// ScenarioHashEqualFold applies the EqualFold predicate on the "scenarioHash" field. +func ScenarioHashEqualFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScenarioHash), v)) + }) +} + +// ScenarioHashContainsFold applies the ContainsFold predicate on the "scenarioHash" field. +func ScenarioHashContainsFold(v string) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScenarioHash), v)) + }) +} + +// SimulatedEQ applies the EQ predicate on the "simulated" field. +func SimulatedEQ(v bool) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSimulated), v)) + }) +} + +// SimulatedNEQ applies the NEQ predicate on the "simulated" field. +func SimulatedNEQ(v bool) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSimulated), v)) + }) +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...predicate.Machine) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasDecisions applies the HasEdge predicate on the "decisions" edge. +func HasDecisions() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DecisionsTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDecisionsWith applies the HasEdge predicate on the "decisions" edge with a given conditions (other predicates). +func HasDecisionsWith(preds ...predicate.Decision) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DecisionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasEvents applies the HasEdge predicate on the "events" edge. +func HasEvents() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EventsTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasEventsWith applies the HasEdge predicate on the "events" edge with a given conditions (other predicates). +func HasEventsWith(preds ...predicate.Event) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EventsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasMetas applies the HasEdge predicate on the "metas" edge. +func HasMetas() predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MetasTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasMetasWith applies the HasEdge predicate on the "metas" edge with a given conditions (other predicates). +func HasMetasWith(preds ...predicate.Meta) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MetasInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Alert) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Alert) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Alert) predicate.Alert { + return predicate.Alert(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/alert_create.go b/pkg/database/ent/alert_create.go new file mode 100644 index 0000000..6d08e8d --- /dev/null +++ b/pkg/database/ent/alert_create.go @@ -0,0 +1,875 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" +) + +// AlertCreate is the builder for creating a Alert entity. +type AlertCreate struct { + config + mutation *AlertMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (ac *AlertCreate) SetCreatedAt(t time.Time) *AlertCreate { + ac.mutation.SetCreatedAt(t) + return ac +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (ac *AlertCreate) SetNillableCreatedAt(t *time.Time) *AlertCreate { + if t != nil { + ac.SetCreatedAt(*t) + } + return ac +} + +// SetUpdatedAt sets the "updated_at" field. +func (ac *AlertCreate) SetUpdatedAt(t time.Time) *AlertCreate { + ac.mutation.SetUpdatedAt(t) + return ac +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (ac *AlertCreate) SetNillableUpdatedAt(t *time.Time) *AlertCreate { + if t != nil { + ac.SetUpdatedAt(*t) + } + return ac +} + +// SetScenario sets the "scenario" field. +func (ac *AlertCreate) SetScenario(s string) *AlertCreate { + ac.mutation.SetScenario(s) + return ac +} + +// SetBucketId sets the "bucketId" field. +func (ac *AlertCreate) SetBucketId(s string) *AlertCreate { + ac.mutation.SetBucketId(s) + return ac +} + +// SetNillableBucketId sets the "bucketId" field if the given value is not nil. +func (ac *AlertCreate) SetNillableBucketId(s *string) *AlertCreate { + if s != nil { + ac.SetBucketId(*s) + } + return ac +} + +// SetMessage sets the "message" field. +func (ac *AlertCreate) SetMessage(s string) *AlertCreate { + ac.mutation.SetMessage(s) + return ac +} + +// SetNillableMessage sets the "message" field if the given value is not nil. +func (ac *AlertCreate) SetNillableMessage(s *string) *AlertCreate { + if s != nil { + ac.SetMessage(*s) + } + return ac +} + +// SetEventsCount sets the "eventsCount" field. +func (ac *AlertCreate) SetEventsCount(i int32) *AlertCreate { + ac.mutation.SetEventsCount(i) + return ac +} + +// SetNillableEventsCount sets the "eventsCount" field if the given value is not nil. +func (ac *AlertCreate) SetNillableEventsCount(i *int32) *AlertCreate { + if i != nil { + ac.SetEventsCount(*i) + } + return ac +} + +// SetStartedAt sets the "startedAt" field. +func (ac *AlertCreate) SetStartedAt(t time.Time) *AlertCreate { + ac.mutation.SetStartedAt(t) + return ac +} + +// SetNillableStartedAt sets the "startedAt" field if the given value is not nil. +func (ac *AlertCreate) SetNillableStartedAt(t *time.Time) *AlertCreate { + if t != nil { + ac.SetStartedAt(*t) + } + return ac +} + +// SetStoppedAt sets the "stoppedAt" field. +func (ac *AlertCreate) SetStoppedAt(t time.Time) *AlertCreate { + ac.mutation.SetStoppedAt(t) + return ac +} + +// SetNillableStoppedAt sets the "stoppedAt" field if the given value is not nil. +func (ac *AlertCreate) SetNillableStoppedAt(t *time.Time) *AlertCreate { + if t != nil { + ac.SetStoppedAt(*t) + } + return ac +} + +// SetSourceIp sets the "sourceIp" field. +func (ac *AlertCreate) SetSourceIp(s string) *AlertCreate { + ac.mutation.SetSourceIp(s) + return ac +} + +// SetNillableSourceIp sets the "sourceIp" field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceIp(s *string) *AlertCreate { + if s != nil { + ac.SetSourceIp(*s) + } + return ac +} + +// SetSourceRange sets the "sourceRange" field. +func (ac *AlertCreate) SetSourceRange(s string) *AlertCreate { + ac.mutation.SetSourceRange(s) + return ac +} + +// SetNillableSourceRange sets the "sourceRange" field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceRange(s *string) *AlertCreate { + if s != nil { + ac.SetSourceRange(*s) + } + return ac +} + +// SetSourceAsNumber sets the "sourceAsNumber" field. +func (ac *AlertCreate) SetSourceAsNumber(s string) *AlertCreate { + ac.mutation.SetSourceAsNumber(s) + return ac +} + +// SetNillableSourceAsNumber sets the "sourceAsNumber" field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceAsNumber(s *string) *AlertCreate { + if s != nil { + ac.SetSourceAsNumber(*s) + } + return ac +} + +// SetSourceAsName sets the "sourceAsName" field. +func (ac *AlertCreate) SetSourceAsName(s string) *AlertCreate { + ac.mutation.SetSourceAsName(s) + return ac +} + +// SetNillableSourceAsName sets the "sourceAsName" field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceAsName(s *string) *AlertCreate { + if s != nil { + ac.SetSourceAsName(*s) + } + return ac +} + +// SetSourceCountry sets the "sourceCountry" field. +func (ac *AlertCreate) SetSourceCountry(s string) *AlertCreate { + ac.mutation.SetSourceCountry(s) + return ac +} + +// SetNillableSourceCountry sets the "sourceCountry" field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceCountry(s *string) *AlertCreate { + if s != nil { + ac.SetSourceCountry(*s) + } + return ac +} + +// SetSourceLatitude sets the "sourceLatitude" field. +func (ac *AlertCreate) SetSourceLatitude(f float32) *AlertCreate { + ac.mutation.SetSourceLatitude(f) + return ac +} + +// SetNillableSourceLatitude sets the "sourceLatitude" field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceLatitude(f *float32) *AlertCreate { + if f != nil { + ac.SetSourceLatitude(*f) + } + return ac +} + +// SetSourceLongitude sets the "sourceLongitude" field. +func (ac *AlertCreate) SetSourceLongitude(f float32) *AlertCreate { + ac.mutation.SetSourceLongitude(f) + return ac +} + +// SetNillableSourceLongitude sets the "sourceLongitude" field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceLongitude(f *float32) *AlertCreate { + if f != nil { + ac.SetSourceLongitude(*f) + } + return ac +} + +// SetSourceScope sets the "sourceScope" field. +func (ac *AlertCreate) SetSourceScope(s string) *AlertCreate { + ac.mutation.SetSourceScope(s) + return ac +} + +// SetNillableSourceScope sets the "sourceScope" field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceScope(s *string) *AlertCreate { + if s != nil { + ac.SetSourceScope(*s) + } + return ac +} + +// SetSourceValue sets the "sourceValue" field. +func (ac *AlertCreate) SetSourceValue(s string) *AlertCreate { + ac.mutation.SetSourceValue(s) + return ac +} + +// SetNillableSourceValue sets the "sourceValue" field if the given value is not nil. +func (ac *AlertCreate) SetNillableSourceValue(s *string) *AlertCreate { + if s != nil { + ac.SetSourceValue(*s) + } + return ac +} + +// SetCapacity sets the "capacity" field. +func (ac *AlertCreate) SetCapacity(i int32) *AlertCreate { + ac.mutation.SetCapacity(i) + return ac +} + +// SetNillableCapacity sets the "capacity" field if the given value is not nil. +func (ac *AlertCreate) SetNillableCapacity(i *int32) *AlertCreate { + if i != nil { + ac.SetCapacity(*i) + } + return ac +} + +// SetLeakSpeed sets the "leakSpeed" field. +func (ac *AlertCreate) SetLeakSpeed(s string) *AlertCreate { + ac.mutation.SetLeakSpeed(s) + return ac +} + +// SetNillableLeakSpeed sets the "leakSpeed" field if the given value is not nil. +func (ac *AlertCreate) SetNillableLeakSpeed(s *string) *AlertCreate { + if s != nil { + ac.SetLeakSpeed(*s) + } + return ac +} + +// SetScenarioVersion sets the "scenarioVersion" field. +func (ac *AlertCreate) SetScenarioVersion(s string) *AlertCreate { + ac.mutation.SetScenarioVersion(s) + return ac +} + +// SetNillableScenarioVersion sets the "scenarioVersion" field if the given value is not nil. +func (ac *AlertCreate) SetNillableScenarioVersion(s *string) *AlertCreate { + if s != nil { + ac.SetScenarioVersion(*s) + } + return ac +} + +// SetScenarioHash sets the "scenarioHash" field. +func (ac *AlertCreate) SetScenarioHash(s string) *AlertCreate { + ac.mutation.SetScenarioHash(s) + return ac +} + +// SetNillableScenarioHash sets the "scenarioHash" field if the given value is not nil. +func (ac *AlertCreate) SetNillableScenarioHash(s *string) *AlertCreate { + if s != nil { + ac.SetScenarioHash(*s) + } + return ac +} + +// SetSimulated sets the "simulated" field. +func (ac *AlertCreate) SetSimulated(b bool) *AlertCreate { + ac.mutation.SetSimulated(b) + return ac +} + +// SetNillableSimulated sets the "simulated" field if the given value is not nil. +func (ac *AlertCreate) SetNillableSimulated(b *bool) *AlertCreate { + if b != nil { + ac.SetSimulated(*b) + } + return ac +} + +// SetOwnerID sets the "owner" edge to the Machine entity by ID. +func (ac *AlertCreate) SetOwnerID(id int) *AlertCreate { + ac.mutation.SetOwnerID(id) + return ac +} + +// SetNillableOwnerID sets the "owner" edge to the Machine entity by ID if the given value is not nil. +func (ac *AlertCreate) SetNillableOwnerID(id *int) *AlertCreate { + if id != nil { + ac = ac.SetOwnerID(*id) + } + return ac +} + +// SetOwner sets the "owner" edge to the Machine entity. +func (ac *AlertCreate) SetOwner(m *Machine) *AlertCreate { + return ac.SetOwnerID(m.ID) +} + +// AddDecisionIDs adds the "decisions" edge to the Decision entity by IDs. +func (ac *AlertCreate) AddDecisionIDs(ids ...int) *AlertCreate { + ac.mutation.AddDecisionIDs(ids...) + return ac +} + +// AddDecisions adds the "decisions" edges to the Decision entity. +func (ac *AlertCreate) AddDecisions(d ...*Decision) *AlertCreate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return ac.AddDecisionIDs(ids...) +} + +// AddEventIDs adds the "events" edge to the Event entity by IDs. +func (ac *AlertCreate) AddEventIDs(ids ...int) *AlertCreate { + ac.mutation.AddEventIDs(ids...) + return ac +} + +// AddEvents adds the "events" edges to the Event entity. +func (ac *AlertCreate) AddEvents(e ...*Event) *AlertCreate { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return ac.AddEventIDs(ids...) +} + +// AddMetaIDs adds the "metas" edge to the Meta entity by IDs. +func (ac *AlertCreate) AddMetaIDs(ids ...int) *AlertCreate { + ac.mutation.AddMetaIDs(ids...) + return ac +} + +// AddMetas adds the "metas" edges to the Meta entity. +func (ac *AlertCreate) AddMetas(m ...*Meta) *AlertCreate { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return ac.AddMetaIDs(ids...) +} + +// Mutation returns the AlertMutation object of the builder. +func (ac *AlertCreate) Mutation() *AlertMutation { + return ac.mutation +} + +// Save creates the Alert in the database. +func (ac *AlertCreate) Save(ctx context.Context) (*Alert, error) { + var ( + err error + node *Alert + ) + ac.defaults() + if len(ac.hooks) == 0 { + if err = ac.check(); err != nil { + return nil, err + } + node, err = ac.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = ac.check(); err != nil { + return nil, err + } + ac.mutation = mutation + if node, err = ac.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(ac.hooks) - 1; i >= 0; i-- { + if ac.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = ac.hooks[i](mut) + } + v, err := mut.Mutate(ctx, ac.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Alert) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from AlertMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (ac *AlertCreate) SaveX(ctx context.Context) *Alert { + v, err := ac.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ac *AlertCreate) Exec(ctx context.Context) error { + _, err := ac.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ac *AlertCreate) ExecX(ctx context.Context) { + if err := ac.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (ac *AlertCreate) defaults() { + if _, ok := ac.mutation.CreatedAt(); !ok { + v := alert.DefaultCreatedAt() + ac.mutation.SetCreatedAt(v) + } + if _, ok := ac.mutation.UpdatedAt(); !ok { + v := alert.DefaultUpdatedAt() + ac.mutation.SetUpdatedAt(v) + } + if _, ok := ac.mutation.BucketId(); !ok { + v := alert.DefaultBucketId + ac.mutation.SetBucketId(v) + } + if _, ok := ac.mutation.Message(); !ok { + v := alert.DefaultMessage + ac.mutation.SetMessage(v) + } + if _, ok := ac.mutation.EventsCount(); !ok { + v := alert.DefaultEventsCount + ac.mutation.SetEventsCount(v) + } + if _, ok := ac.mutation.StartedAt(); !ok { + v := alert.DefaultStartedAt() + ac.mutation.SetStartedAt(v) + } + if _, ok := ac.mutation.StoppedAt(); !ok { + v := alert.DefaultStoppedAt() + ac.mutation.SetStoppedAt(v) + } + if _, ok := ac.mutation.Simulated(); !ok { + v := alert.DefaultSimulated + ac.mutation.SetSimulated(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ac *AlertCreate) check() error { + if _, ok := ac.mutation.Scenario(); !ok { + return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "Alert.scenario"`)} + } + if _, ok := ac.mutation.Simulated(); !ok { + return &ValidationError{Name: "simulated", err: errors.New(`ent: missing required field "Alert.simulated"`)} + } + return nil +} + +func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) { + _node, _spec := ac.createSpec() + if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { + var ( + _node = &Alert{config: ac.config} + _spec = &sqlgraph.CreateSpec{ + Table: alert.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + } + ) + if value, ok := ac.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldCreatedAt, + }) + _node.CreatedAt = &value + } + if value, ok := ac.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldUpdatedAt, + }) + _node.UpdatedAt = &value + } + if value, ok := ac.mutation.Scenario(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenario, + }) + _node.Scenario = value + } + if value, ok := ac.mutation.BucketId(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldBucketId, + }) + _node.BucketId = value + } + if value, ok := ac.mutation.Message(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldMessage, + }) + _node.Message = value + } + if value, ok := ac.mutation.EventsCount(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldEventsCount, + }) + _node.EventsCount = value + } + if value, ok := ac.mutation.StartedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStartedAt, + }) + _node.StartedAt = value + } + if value, ok := ac.mutation.StoppedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStoppedAt, + }) + _node.StoppedAt = value + } + if value, ok := ac.mutation.SourceIp(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceIp, + }) + _node.SourceIp = value + } + if value, ok := ac.mutation.SourceRange(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceRange, + }) + _node.SourceRange = value + } + if value, ok := ac.mutation.SourceAsNumber(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsNumber, + }) + _node.SourceAsNumber = value + } + if value, ok := ac.mutation.SourceAsName(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsName, + }) + _node.SourceAsName = value + } + if value, ok := ac.mutation.SourceCountry(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceCountry, + }) + _node.SourceCountry = value + } + if value, ok := ac.mutation.SourceLatitude(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLatitude, + }) + _node.SourceLatitude = value + } + if value, ok := ac.mutation.SourceLongitude(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLongitude, + }) + _node.SourceLongitude = value + } + if value, ok := ac.mutation.SourceScope(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceScope, + }) + _node.SourceScope = value + } + if value, ok := ac.mutation.SourceValue(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceValue, + }) + _node.SourceValue = value + } + if value, ok := ac.mutation.Capacity(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldCapacity, + }) + _node.Capacity = value + } + if value, ok := ac.mutation.LeakSpeed(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldLeakSpeed, + }) + _node.LeakSpeed = value + } + if value, ok := ac.mutation.ScenarioVersion(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioVersion, + }) + _node.ScenarioVersion = value + } + if value, ok := ac.mutation.ScenarioHash(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioHash, + }) + _node.ScenarioHash = value + } + if value, ok := ac.mutation.Simulated(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: alert.FieldSimulated, + }) + _node.Simulated = value + } + if nodes := ac.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: alert.OwnerTable, + Columns: []string{alert.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.machine_alerts = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := ac.mutation.DecisionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := ac.mutation.EventsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := ac.mutation.MetasIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// AlertCreateBulk is the builder for creating many Alert entities in bulk. +type AlertCreateBulk struct { + config + builders []*AlertCreate +} + +// Save creates the Alert entities in the database. +func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) { + specs := make([]*sqlgraph.CreateSpec, len(acb.builders)) + nodes := make([]*Alert, len(acb.builders)) + mutators := make([]Mutator, len(acb.builders)) + for i := range acb.builders { + func(i int, root context.Context) { + builder := acb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, acb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, acb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (acb *AlertCreateBulk) SaveX(ctx context.Context) []*Alert { + v, err := acb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (acb *AlertCreateBulk) Exec(ctx context.Context) error { + _, err := acb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (acb *AlertCreateBulk) ExecX(ctx context.Context) { + if err := acb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/alert_delete.go b/pkg/database/ent/alert_delete.go new file mode 100644 index 0000000..014bcc2 --- /dev/null +++ b/pkg/database/ent/alert_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// AlertDelete is the builder for deleting a Alert entity. +type AlertDelete struct { + config + hooks []Hook + mutation *AlertMutation +} + +// Where appends a list predicates to the AlertDelete builder. +func (ad *AlertDelete) Where(ps ...predicate.Alert) *AlertDelete { + ad.mutation.Where(ps...) + return ad +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ad *AlertDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(ad.hooks) == 0 { + affected, err = ad.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + ad.mutation = mutation + affected, err = ad.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(ad.hooks) - 1; i >= 0; i-- { + if ad.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = ad.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, ad.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ad *AlertDelete) ExecX(ctx context.Context) int { + n, err := ad.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ad *AlertDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: alert.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + if ps := ad.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ad.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// AlertDeleteOne is the builder for deleting a single Alert entity. +type AlertDeleteOne struct { + ad *AlertDelete +} + +// Exec executes the deletion query. +func (ado *AlertDeleteOne) Exec(ctx context.Context) error { + n, err := ado.ad.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{alert.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ado *AlertDeleteOne) ExecX(ctx context.Context) { + ado.ad.ExecX(ctx) +} diff --git a/pkg/database/ent/alert_query.go b/pkg/database/ent/alert_query.go new file mode 100644 index 0000000..a7a00ca --- /dev/null +++ b/pkg/database/ent/alert_query.go @@ -0,0 +1,839 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// AlertQuery is the builder for querying Alert entities. +type AlertQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.Alert + withOwner *MachineQuery + withDecisions *DecisionQuery + withEvents *EventQuery + withMetas *MetaQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AlertQuery builder. +func (aq *AlertQuery) Where(ps ...predicate.Alert) *AlertQuery { + aq.predicates = append(aq.predicates, ps...) + return aq +} + +// Limit adds a limit step to the query. +func (aq *AlertQuery) Limit(limit int) *AlertQuery { + aq.limit = &limit + return aq +} + +// Offset adds an offset step to the query. +func (aq *AlertQuery) Offset(offset int) *AlertQuery { + aq.offset = &offset + return aq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (aq *AlertQuery) Unique(unique bool) *AlertQuery { + aq.unique = &unique + return aq +} + +// Order adds an order step to the query. +func (aq *AlertQuery) Order(o ...OrderFunc) *AlertQuery { + aq.order = append(aq.order, o...) + return aq +} + +// QueryOwner chains the current query on the "owner" edge. +func (aq *AlertQuery) QueryOwner() *MachineQuery { + query := &MachineQuery{config: aq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, selector), + sqlgraph.To(machine.Table, machine.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, alert.OwnerTable, alert.OwnerColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryDecisions chains the current query on the "decisions" edge. +func (aq *AlertQuery) QueryDecisions() *DecisionQuery { + query := &DecisionQuery{config: aq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, selector), + sqlgraph.To(decision.Table, decision.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.DecisionsTable, alert.DecisionsColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryEvents chains the current query on the "events" edge. +func (aq *AlertQuery) QueryEvents() *EventQuery { + query := &EventQuery{config: aq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, selector), + sqlgraph.To(event.Table, event.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.EventsTable, alert.EventsColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryMetas chains the current query on the "metas" edge. +func (aq *AlertQuery) QueryMetas() *MetaQuery { + query := &MetaQuery{config: aq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, selector), + sqlgraph.To(meta.Table, meta.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.MetasTable, alert.MetasColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Alert entity from the query. +// Returns a *NotFoundError when no Alert was found. +func (aq *AlertQuery) First(ctx context.Context) (*Alert, error) { + nodes, err := aq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{alert.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (aq *AlertQuery) FirstX(ctx context.Context) *Alert { + node, err := aq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Alert ID from the query. +// Returns a *NotFoundError when no Alert ID was found. +func (aq *AlertQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = aq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{alert.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (aq *AlertQuery) FirstIDX(ctx context.Context) int { + id, err := aq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Alert entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Alert entity is found. +// Returns a *NotFoundError when no Alert entities are found. +func (aq *AlertQuery) Only(ctx context.Context) (*Alert, error) { + nodes, err := aq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{alert.Label} + default: + return nil, &NotSingularError{alert.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (aq *AlertQuery) OnlyX(ctx context.Context) *Alert { + node, err := aq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Alert ID in the query. +// Returns a *NotSingularError when more than one Alert ID is found. +// Returns a *NotFoundError when no entities are found. +func (aq *AlertQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = aq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{alert.Label} + default: + err = &NotSingularError{alert.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (aq *AlertQuery) OnlyIDX(ctx context.Context) int { + id, err := aq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Alerts. +func (aq *AlertQuery) All(ctx context.Context) ([]*Alert, error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + return aq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (aq *AlertQuery) AllX(ctx context.Context) []*Alert { + nodes, err := aq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Alert IDs. +func (aq *AlertQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (aq *AlertQuery) IDsX(ctx context.Context) []int { + ids, err := aq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (aq *AlertQuery) Count(ctx context.Context) (int, error) { + if err := aq.prepareQuery(ctx); err != nil { + return 0, err + } + return aq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (aq *AlertQuery) CountX(ctx context.Context) int { + count, err := aq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (aq *AlertQuery) Exist(ctx context.Context) (bool, error) { + if err := aq.prepareQuery(ctx); err != nil { + return false, err + } + return aq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (aq *AlertQuery) ExistX(ctx context.Context) bool { + exist, err := aq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AlertQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (aq *AlertQuery) Clone() *AlertQuery { + if aq == nil { + return nil + } + return &AlertQuery{ + config: aq.config, + limit: aq.limit, + offset: aq.offset, + order: append([]OrderFunc{}, aq.order...), + predicates: append([]predicate.Alert{}, aq.predicates...), + withOwner: aq.withOwner.Clone(), + withDecisions: aq.withDecisions.Clone(), + withEvents: aq.withEvents.Clone(), + withMetas: aq.withMetas.Clone(), + // clone intermediate query. + sql: aq.sql.Clone(), + path: aq.path, + unique: aq.unique, + } +} + +// WithOwner tells the query-builder to eager-load the nodes that are connected to +// the "owner" edge. The optional arguments are used to configure the query builder of the edge. +func (aq *AlertQuery) WithOwner(opts ...func(*MachineQuery)) *AlertQuery { + query := &MachineQuery{config: aq.config} + for _, opt := range opts { + opt(query) + } + aq.withOwner = query + return aq +} + +// WithDecisions tells the query-builder to eager-load the nodes that are connected to +// the "decisions" edge. The optional arguments are used to configure the query builder of the edge. +func (aq *AlertQuery) WithDecisions(opts ...func(*DecisionQuery)) *AlertQuery { + query := &DecisionQuery{config: aq.config} + for _, opt := range opts { + opt(query) + } + aq.withDecisions = query + return aq +} + +// WithEvents tells the query-builder to eager-load the nodes that are connected to +// the "events" edge. The optional arguments are used to configure the query builder of the edge. +func (aq *AlertQuery) WithEvents(opts ...func(*EventQuery)) *AlertQuery { + query := &EventQuery{config: aq.config} + for _, opt := range opts { + opt(query) + } + aq.withEvents = query + return aq +} + +// WithMetas tells the query-builder to eager-load the nodes that are connected to +// the "metas" edge. The optional arguments are used to configure the query builder of the edge. +func (aq *AlertQuery) WithMetas(opts ...func(*MetaQuery)) *AlertQuery { + query := &MetaQuery{config: aq.config} + for _, opt := range opts { + opt(query) + } + aq.withMetas = query + return aq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Alert.Query(). +// GroupBy(alert.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy { + grbuild := &AlertGroupBy{config: aq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + return aq.sqlQuery(ctx), nil + } + grbuild.label = alert.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Alert.Query(). +// Select(alert.FieldCreatedAt). +// Scan(ctx, &v) +func (aq *AlertQuery) Select(fields ...string) *AlertSelect { + aq.fields = append(aq.fields, fields...) + selbuild := &AlertSelect{AlertQuery: aq} + selbuild.label = alert.Label + selbuild.flds, selbuild.scan = &aq.fields, selbuild.Scan + return selbuild +} + +func (aq *AlertQuery) prepareQuery(ctx context.Context) error { + for _, f := range aq.fields { + if !alert.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if aq.path != nil { + prev, err := aq.path(ctx) + if err != nil { + return err + } + aq.sql = prev + } + return nil +} + +func (aq *AlertQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Alert, error) { + var ( + nodes = []*Alert{} + withFKs = aq.withFKs + _spec = aq.querySpec() + loadedTypes = [4]bool{ + aq.withOwner != nil, + aq.withDecisions != nil, + aq.withEvents != nil, + aq.withMetas != nil, + } + ) + if aq.withOwner != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, alert.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Alert).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Alert{config: aq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, aq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := aq.withOwner; query != nil { + if err := aq.loadOwner(ctx, query, nodes, nil, + func(n *Alert, e *Machine) { n.Edges.Owner = e }); err != nil { + return nil, err + } + } + if query := aq.withDecisions; query != nil { + if err := aq.loadDecisions(ctx, query, nodes, + func(n *Alert) { n.Edges.Decisions = []*Decision{} }, + func(n *Alert, e *Decision) { n.Edges.Decisions = append(n.Edges.Decisions, e) }); err != nil { + return nil, err + } + } + if query := aq.withEvents; query != nil { + if err := aq.loadEvents(ctx, query, nodes, + func(n *Alert) { n.Edges.Events = []*Event{} }, + func(n *Alert, e *Event) { n.Edges.Events = append(n.Edges.Events, e) }); err != nil { + return nil, err + } + } + if query := aq.withMetas; query != nil { + if err := aq.loadMetas(ctx, query, nodes, + func(n *Alert) { n.Edges.Metas = []*Meta{} }, + func(n *Alert, e *Meta) { n.Edges.Metas = append(n.Edges.Metas, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (aq *AlertQuery) loadOwner(ctx context.Context, query *MachineQuery, nodes []*Alert, init func(*Alert), assign func(*Alert, *Machine)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Alert) + for i := range nodes { + if nodes[i].machine_alerts == nil { + continue + } + fk := *nodes[i].machine_alerts + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + query.Where(machine.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "machine_alerts" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (aq *AlertQuery) loadDecisions(ctx context.Context, query *DecisionQuery, nodes []*Alert, init func(*Alert), assign func(*Alert, *Decision)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Alert) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Decision(func(s *sql.Selector) { + s.Where(sql.InValues(alert.DecisionsColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.alert_decisions + if fk == nil { + return fmt.Errorf(`foreign-key "alert_decisions" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected foreign-key "alert_decisions" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (aq *AlertQuery) loadEvents(ctx context.Context, query *EventQuery, nodes []*Alert, init func(*Alert), assign func(*Alert, *Event)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Alert) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Event(func(s *sql.Selector) { + s.Where(sql.InValues(alert.EventsColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.alert_events + if fk == nil { + return fmt.Errorf(`foreign-key "alert_events" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected foreign-key "alert_events" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []*Alert, init func(*Alert), assign func(*Alert, *Meta)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Alert) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Meta(func(s *sql.Selector) { + s.Where(sql.InValues(alert.MetasColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.alert_metas + if fk == nil { + return fmt.Errorf(`foreign-key "alert_metas" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected foreign-key "alert_metas" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (aq *AlertQuery) sqlCount(ctx context.Context) (int, error) { + _spec := aq.querySpec() + _spec.Node.Columns = aq.fields + if len(aq.fields) > 0 { + _spec.Unique = aq.unique != nil && *aq.unique + } + return sqlgraph.CountNodes(ctx, aq.driver, _spec) +} + +func (aq *AlertQuery) sqlExist(ctx context.Context) (bool, error) { + switch _, err := aq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: alert.Table, + Columns: alert.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + From: aq.sql, + Unique: true, + } + if unique := aq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := aq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, alert.FieldID) + for i := range fields { + if fields[i] != alert.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := aq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := aq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := aq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := aq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(aq.driver.Dialect()) + t1 := builder.Table(alert.Table) + columns := aq.fields + if len(columns) == 0 { + columns = alert.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if aq.sql != nil { + selector = aq.sql + selector.Select(selector.Columns(columns...)...) + } + if aq.unique != nil && *aq.unique { + selector.Distinct() + } + for _, p := range aq.predicates { + p(selector) + } + for _, p := range aq.order { + p(selector) + } + if offset := aq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := aq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AlertGroupBy is the group-by builder for Alert entities. +type AlertGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (agb *AlertGroupBy) Aggregate(fns ...AggregateFunc) *AlertGroupBy { + agb.fns = append(agb.fns, fns...) + return agb +} + +// Scan applies the group-by query and scans the result into the given value. +func (agb *AlertGroupBy) Scan(ctx context.Context, v any) error { + query, err := agb.path(ctx) + if err != nil { + return err + } + agb.sql = query + return agb.sqlScan(ctx, v) +} + +func (agb *AlertGroupBy) sqlScan(ctx context.Context, v any) error { + for _, f := range agb.fields { + if !alert.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := agb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := agb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (agb *AlertGroupBy) sqlQuery() *sql.Selector { + selector := agb.sql.Select() + aggregation := make([]string, 0, len(agb.fns)) + for _, fn := range agb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(agb.fields)+len(agb.fns)) + for _, f := range agb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(agb.fields...)...) +} + +// AlertSelect is the builder for selecting fields of Alert entities. +type AlertSelect struct { + *AlertQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (as *AlertSelect) Scan(ctx context.Context, v any) error { + if err := as.prepareQuery(ctx); err != nil { + return err + } + as.sql = as.AlertQuery.sqlQuery(ctx) + return as.sqlScan(ctx, v) +} + +func (as *AlertSelect) sqlScan(ctx context.Context, v any) error { + rows := &sql.Rows{} + query, args := as.sql.Query() + if err := as.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go new file mode 100644 index 0000000..1f9ecce --- /dev/null +++ b/pkg/database/ent/alert_update.go @@ -0,0 +1,2404 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// AlertUpdate is the builder for updating Alert entities. +type AlertUpdate struct { + config + hooks []Hook + mutation *AlertMutation +} + +// Where appends a list predicates to the AlertUpdate builder. +func (au *AlertUpdate) Where(ps ...predicate.Alert) *AlertUpdate { + au.mutation.Where(ps...) + return au +} + +// SetCreatedAt sets the "created_at" field. +func (au *AlertUpdate) SetCreatedAt(t time.Time) *AlertUpdate { + au.mutation.SetCreatedAt(t) + return au +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (au *AlertUpdate) ClearCreatedAt() *AlertUpdate { + au.mutation.ClearCreatedAt() + return au +} + +// SetUpdatedAt sets the "updated_at" field. +func (au *AlertUpdate) SetUpdatedAt(t time.Time) *AlertUpdate { + au.mutation.SetUpdatedAt(t) + return au +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (au *AlertUpdate) ClearUpdatedAt() *AlertUpdate { + au.mutation.ClearUpdatedAt() + return au +} + +// SetScenario sets the "scenario" field. +func (au *AlertUpdate) SetScenario(s string) *AlertUpdate { + au.mutation.SetScenario(s) + return au +} + +// SetBucketId sets the "bucketId" field. +func (au *AlertUpdate) SetBucketId(s string) *AlertUpdate { + au.mutation.SetBucketId(s) + return au +} + +// SetNillableBucketId sets the "bucketId" field if the given value is not nil. +func (au *AlertUpdate) SetNillableBucketId(s *string) *AlertUpdate { + if s != nil { + au.SetBucketId(*s) + } + return au +} + +// ClearBucketId clears the value of the "bucketId" field. +func (au *AlertUpdate) ClearBucketId() *AlertUpdate { + au.mutation.ClearBucketId() + return au +} + +// SetMessage sets the "message" field. +func (au *AlertUpdate) SetMessage(s string) *AlertUpdate { + au.mutation.SetMessage(s) + return au +} + +// SetNillableMessage sets the "message" field if the given value is not nil. +func (au *AlertUpdate) SetNillableMessage(s *string) *AlertUpdate { + if s != nil { + au.SetMessage(*s) + } + return au +} + +// ClearMessage clears the value of the "message" field. +func (au *AlertUpdate) ClearMessage() *AlertUpdate { + au.mutation.ClearMessage() + return au +} + +// SetEventsCount sets the "eventsCount" field. +func (au *AlertUpdate) SetEventsCount(i int32) *AlertUpdate { + au.mutation.ResetEventsCount() + au.mutation.SetEventsCount(i) + return au +} + +// SetNillableEventsCount sets the "eventsCount" field if the given value is not nil. +func (au *AlertUpdate) SetNillableEventsCount(i *int32) *AlertUpdate { + if i != nil { + au.SetEventsCount(*i) + } + return au +} + +// AddEventsCount adds i to the "eventsCount" field. +func (au *AlertUpdate) AddEventsCount(i int32) *AlertUpdate { + au.mutation.AddEventsCount(i) + return au +} + +// ClearEventsCount clears the value of the "eventsCount" field. +func (au *AlertUpdate) ClearEventsCount() *AlertUpdate { + au.mutation.ClearEventsCount() + return au +} + +// SetStartedAt sets the "startedAt" field. +func (au *AlertUpdate) SetStartedAt(t time.Time) *AlertUpdate { + au.mutation.SetStartedAt(t) + return au +} + +// SetNillableStartedAt sets the "startedAt" field if the given value is not nil. +func (au *AlertUpdate) SetNillableStartedAt(t *time.Time) *AlertUpdate { + if t != nil { + au.SetStartedAt(*t) + } + return au +} + +// ClearStartedAt clears the value of the "startedAt" field. +func (au *AlertUpdate) ClearStartedAt() *AlertUpdate { + au.mutation.ClearStartedAt() + return au +} + +// SetStoppedAt sets the "stoppedAt" field. +func (au *AlertUpdate) SetStoppedAt(t time.Time) *AlertUpdate { + au.mutation.SetStoppedAt(t) + return au +} + +// SetNillableStoppedAt sets the "stoppedAt" field if the given value is not nil. +func (au *AlertUpdate) SetNillableStoppedAt(t *time.Time) *AlertUpdate { + if t != nil { + au.SetStoppedAt(*t) + } + return au +} + +// ClearStoppedAt clears the value of the "stoppedAt" field. +func (au *AlertUpdate) ClearStoppedAt() *AlertUpdate { + au.mutation.ClearStoppedAt() + return au +} + +// SetSourceIp sets the "sourceIp" field. +func (au *AlertUpdate) SetSourceIp(s string) *AlertUpdate { + au.mutation.SetSourceIp(s) + return au +} + +// SetNillableSourceIp sets the "sourceIp" field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceIp(s *string) *AlertUpdate { + if s != nil { + au.SetSourceIp(*s) + } + return au +} + +// ClearSourceIp clears the value of the "sourceIp" field. +func (au *AlertUpdate) ClearSourceIp() *AlertUpdate { + au.mutation.ClearSourceIp() + return au +} + +// SetSourceRange sets the "sourceRange" field. +func (au *AlertUpdate) SetSourceRange(s string) *AlertUpdate { + au.mutation.SetSourceRange(s) + return au +} + +// SetNillableSourceRange sets the "sourceRange" field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceRange(s *string) *AlertUpdate { + if s != nil { + au.SetSourceRange(*s) + } + return au +} + +// ClearSourceRange clears the value of the "sourceRange" field. +func (au *AlertUpdate) ClearSourceRange() *AlertUpdate { + au.mutation.ClearSourceRange() + return au +} + +// SetSourceAsNumber sets the "sourceAsNumber" field. +func (au *AlertUpdate) SetSourceAsNumber(s string) *AlertUpdate { + au.mutation.SetSourceAsNumber(s) + return au +} + +// SetNillableSourceAsNumber sets the "sourceAsNumber" field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceAsNumber(s *string) *AlertUpdate { + if s != nil { + au.SetSourceAsNumber(*s) + } + return au +} + +// ClearSourceAsNumber clears the value of the "sourceAsNumber" field. +func (au *AlertUpdate) ClearSourceAsNumber() *AlertUpdate { + au.mutation.ClearSourceAsNumber() + return au +} + +// SetSourceAsName sets the "sourceAsName" field. +func (au *AlertUpdate) SetSourceAsName(s string) *AlertUpdate { + au.mutation.SetSourceAsName(s) + return au +} + +// SetNillableSourceAsName sets the "sourceAsName" field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceAsName(s *string) *AlertUpdate { + if s != nil { + au.SetSourceAsName(*s) + } + return au +} + +// ClearSourceAsName clears the value of the "sourceAsName" field. +func (au *AlertUpdate) ClearSourceAsName() *AlertUpdate { + au.mutation.ClearSourceAsName() + return au +} + +// SetSourceCountry sets the "sourceCountry" field. +func (au *AlertUpdate) SetSourceCountry(s string) *AlertUpdate { + au.mutation.SetSourceCountry(s) + return au +} + +// SetNillableSourceCountry sets the "sourceCountry" field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceCountry(s *string) *AlertUpdate { + if s != nil { + au.SetSourceCountry(*s) + } + return au +} + +// ClearSourceCountry clears the value of the "sourceCountry" field. +func (au *AlertUpdate) ClearSourceCountry() *AlertUpdate { + au.mutation.ClearSourceCountry() + return au +} + +// SetSourceLatitude sets the "sourceLatitude" field. +func (au *AlertUpdate) SetSourceLatitude(f float32) *AlertUpdate { + au.mutation.ResetSourceLatitude() + au.mutation.SetSourceLatitude(f) + return au +} + +// SetNillableSourceLatitude sets the "sourceLatitude" field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceLatitude(f *float32) *AlertUpdate { + if f != nil { + au.SetSourceLatitude(*f) + } + return au +} + +// AddSourceLatitude adds f to the "sourceLatitude" field. +func (au *AlertUpdate) AddSourceLatitude(f float32) *AlertUpdate { + au.mutation.AddSourceLatitude(f) + return au +} + +// ClearSourceLatitude clears the value of the "sourceLatitude" field. +func (au *AlertUpdate) ClearSourceLatitude() *AlertUpdate { + au.mutation.ClearSourceLatitude() + return au +} + +// SetSourceLongitude sets the "sourceLongitude" field. +func (au *AlertUpdate) SetSourceLongitude(f float32) *AlertUpdate { + au.mutation.ResetSourceLongitude() + au.mutation.SetSourceLongitude(f) + return au +} + +// SetNillableSourceLongitude sets the "sourceLongitude" field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceLongitude(f *float32) *AlertUpdate { + if f != nil { + au.SetSourceLongitude(*f) + } + return au +} + +// AddSourceLongitude adds f to the "sourceLongitude" field. +func (au *AlertUpdate) AddSourceLongitude(f float32) *AlertUpdate { + au.mutation.AddSourceLongitude(f) + return au +} + +// ClearSourceLongitude clears the value of the "sourceLongitude" field. +func (au *AlertUpdate) ClearSourceLongitude() *AlertUpdate { + au.mutation.ClearSourceLongitude() + return au +} + +// SetSourceScope sets the "sourceScope" field. +func (au *AlertUpdate) SetSourceScope(s string) *AlertUpdate { + au.mutation.SetSourceScope(s) + return au +} + +// SetNillableSourceScope sets the "sourceScope" field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceScope(s *string) *AlertUpdate { + if s != nil { + au.SetSourceScope(*s) + } + return au +} + +// ClearSourceScope clears the value of the "sourceScope" field. +func (au *AlertUpdate) ClearSourceScope() *AlertUpdate { + au.mutation.ClearSourceScope() + return au +} + +// SetSourceValue sets the "sourceValue" field. +func (au *AlertUpdate) SetSourceValue(s string) *AlertUpdate { + au.mutation.SetSourceValue(s) + return au +} + +// SetNillableSourceValue sets the "sourceValue" field if the given value is not nil. +func (au *AlertUpdate) SetNillableSourceValue(s *string) *AlertUpdate { + if s != nil { + au.SetSourceValue(*s) + } + return au +} + +// ClearSourceValue clears the value of the "sourceValue" field. +func (au *AlertUpdate) ClearSourceValue() *AlertUpdate { + au.mutation.ClearSourceValue() + return au +} + +// SetCapacity sets the "capacity" field. +func (au *AlertUpdate) SetCapacity(i int32) *AlertUpdate { + au.mutation.ResetCapacity() + au.mutation.SetCapacity(i) + return au +} + +// SetNillableCapacity sets the "capacity" field if the given value is not nil. +func (au *AlertUpdate) SetNillableCapacity(i *int32) *AlertUpdate { + if i != nil { + au.SetCapacity(*i) + } + return au +} + +// AddCapacity adds i to the "capacity" field. +func (au *AlertUpdate) AddCapacity(i int32) *AlertUpdate { + au.mutation.AddCapacity(i) + return au +} + +// ClearCapacity clears the value of the "capacity" field. +func (au *AlertUpdate) ClearCapacity() *AlertUpdate { + au.mutation.ClearCapacity() + return au +} + +// SetLeakSpeed sets the "leakSpeed" field. +func (au *AlertUpdate) SetLeakSpeed(s string) *AlertUpdate { + au.mutation.SetLeakSpeed(s) + return au +} + +// SetNillableLeakSpeed sets the "leakSpeed" field if the given value is not nil. +func (au *AlertUpdate) SetNillableLeakSpeed(s *string) *AlertUpdate { + if s != nil { + au.SetLeakSpeed(*s) + } + return au +} + +// ClearLeakSpeed clears the value of the "leakSpeed" field. +func (au *AlertUpdate) ClearLeakSpeed() *AlertUpdate { + au.mutation.ClearLeakSpeed() + return au +} + +// SetScenarioVersion sets the "scenarioVersion" field. +func (au *AlertUpdate) SetScenarioVersion(s string) *AlertUpdate { + au.mutation.SetScenarioVersion(s) + return au +} + +// SetNillableScenarioVersion sets the "scenarioVersion" field if the given value is not nil. +func (au *AlertUpdate) SetNillableScenarioVersion(s *string) *AlertUpdate { + if s != nil { + au.SetScenarioVersion(*s) + } + return au +} + +// ClearScenarioVersion clears the value of the "scenarioVersion" field. +func (au *AlertUpdate) ClearScenarioVersion() *AlertUpdate { + au.mutation.ClearScenarioVersion() + return au +} + +// SetScenarioHash sets the "scenarioHash" field. +func (au *AlertUpdate) SetScenarioHash(s string) *AlertUpdate { + au.mutation.SetScenarioHash(s) + return au +} + +// SetNillableScenarioHash sets the "scenarioHash" field if the given value is not nil. +func (au *AlertUpdate) SetNillableScenarioHash(s *string) *AlertUpdate { + if s != nil { + au.SetScenarioHash(*s) + } + return au +} + +// ClearScenarioHash clears the value of the "scenarioHash" field. +func (au *AlertUpdate) ClearScenarioHash() *AlertUpdate { + au.mutation.ClearScenarioHash() + return au +} + +// SetSimulated sets the "simulated" field. +func (au *AlertUpdate) SetSimulated(b bool) *AlertUpdate { + au.mutation.SetSimulated(b) + return au +} + +// SetNillableSimulated sets the "simulated" field if the given value is not nil. +func (au *AlertUpdate) SetNillableSimulated(b *bool) *AlertUpdate { + if b != nil { + au.SetSimulated(*b) + } + return au +} + +// SetOwnerID sets the "owner" edge to the Machine entity by ID. +func (au *AlertUpdate) SetOwnerID(id int) *AlertUpdate { + au.mutation.SetOwnerID(id) + return au +} + +// SetNillableOwnerID sets the "owner" edge to the Machine entity by ID if the given value is not nil. +func (au *AlertUpdate) SetNillableOwnerID(id *int) *AlertUpdate { + if id != nil { + au = au.SetOwnerID(*id) + } + return au +} + +// SetOwner sets the "owner" edge to the Machine entity. +func (au *AlertUpdate) SetOwner(m *Machine) *AlertUpdate { + return au.SetOwnerID(m.ID) +} + +// AddDecisionIDs adds the "decisions" edge to the Decision entity by IDs. +func (au *AlertUpdate) AddDecisionIDs(ids ...int) *AlertUpdate { + au.mutation.AddDecisionIDs(ids...) + return au +} + +// AddDecisions adds the "decisions" edges to the Decision entity. +func (au *AlertUpdate) AddDecisions(d ...*Decision) *AlertUpdate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return au.AddDecisionIDs(ids...) +} + +// AddEventIDs adds the "events" edge to the Event entity by IDs. +func (au *AlertUpdate) AddEventIDs(ids ...int) *AlertUpdate { + au.mutation.AddEventIDs(ids...) + return au +} + +// AddEvents adds the "events" edges to the Event entity. +func (au *AlertUpdate) AddEvents(e ...*Event) *AlertUpdate { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return au.AddEventIDs(ids...) +} + +// AddMetaIDs adds the "metas" edge to the Meta entity by IDs. +func (au *AlertUpdate) AddMetaIDs(ids ...int) *AlertUpdate { + au.mutation.AddMetaIDs(ids...) + return au +} + +// AddMetas adds the "metas" edges to the Meta entity. +func (au *AlertUpdate) AddMetas(m ...*Meta) *AlertUpdate { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return au.AddMetaIDs(ids...) +} + +// Mutation returns the AlertMutation object of the builder. +func (au *AlertUpdate) Mutation() *AlertMutation { + return au.mutation +} + +// ClearOwner clears the "owner" edge to the Machine entity. +func (au *AlertUpdate) ClearOwner() *AlertUpdate { + au.mutation.ClearOwner() + return au +} + +// ClearDecisions clears all "decisions" edges to the Decision entity. +func (au *AlertUpdate) ClearDecisions() *AlertUpdate { + au.mutation.ClearDecisions() + return au +} + +// RemoveDecisionIDs removes the "decisions" edge to Decision entities by IDs. +func (au *AlertUpdate) RemoveDecisionIDs(ids ...int) *AlertUpdate { + au.mutation.RemoveDecisionIDs(ids...) + return au +} + +// RemoveDecisions removes "decisions" edges to Decision entities. +func (au *AlertUpdate) RemoveDecisions(d ...*Decision) *AlertUpdate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return au.RemoveDecisionIDs(ids...) +} + +// ClearEvents clears all "events" edges to the Event entity. +func (au *AlertUpdate) ClearEvents() *AlertUpdate { + au.mutation.ClearEvents() + return au +} + +// RemoveEventIDs removes the "events" edge to Event entities by IDs. +func (au *AlertUpdate) RemoveEventIDs(ids ...int) *AlertUpdate { + au.mutation.RemoveEventIDs(ids...) + return au +} + +// RemoveEvents removes "events" edges to Event entities. +func (au *AlertUpdate) RemoveEvents(e ...*Event) *AlertUpdate { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return au.RemoveEventIDs(ids...) +} + +// ClearMetas clears all "metas" edges to the Meta entity. +func (au *AlertUpdate) ClearMetas() *AlertUpdate { + au.mutation.ClearMetas() + return au +} + +// RemoveMetaIDs removes the "metas" edge to Meta entities by IDs. +func (au *AlertUpdate) RemoveMetaIDs(ids ...int) *AlertUpdate { + au.mutation.RemoveMetaIDs(ids...) + return au +} + +// RemoveMetas removes "metas" edges to Meta entities. +func (au *AlertUpdate) RemoveMetas(m ...*Meta) *AlertUpdate { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return au.RemoveMetaIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (au *AlertUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + au.defaults() + if len(au.hooks) == 0 { + affected, err = au.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + au.mutation = mutation + affected, err = au.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(au.hooks) - 1; i >= 0; i-- { + if au.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = au.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, au.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (au *AlertUpdate) SaveX(ctx context.Context) int { + affected, err := au.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (au *AlertUpdate) Exec(ctx context.Context) error { + _, err := au.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (au *AlertUpdate) ExecX(ctx context.Context) { + if err := au.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (au *AlertUpdate) defaults() { + if _, ok := au.mutation.CreatedAt(); !ok && !au.mutation.CreatedAtCleared() { + v := alert.UpdateDefaultCreatedAt() + au.mutation.SetCreatedAt(v) + } + if _, ok := au.mutation.UpdatedAt(); !ok && !au.mutation.UpdatedAtCleared() { + v := alert.UpdateDefaultUpdatedAt() + au.mutation.SetUpdatedAt(v) + } +} + +func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: alert.Table, + Columns: alert.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + if ps := au.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := au.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldCreatedAt, + }) + } + if au.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldCreatedAt, + }) + } + if value, ok := au.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldUpdatedAt, + }) + } + if au.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldUpdatedAt, + }) + } + if value, ok := au.mutation.Scenario(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenario, + }) + } + if value, ok := au.mutation.BucketId(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldBucketId, + }) + } + if au.mutation.BucketIdCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldBucketId, + }) + } + if value, ok := au.mutation.Message(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldMessage, + }) + } + if au.mutation.MessageCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldMessage, + }) + } + if value, ok := au.mutation.EventsCount(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldEventsCount, + }) + } + if value, ok := au.mutation.AddedEventsCount(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldEventsCount, + }) + } + if au.mutation.EventsCountCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Column: alert.FieldEventsCount, + }) + } + if value, ok := au.mutation.StartedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStartedAt, + }) + } + if au.mutation.StartedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldStartedAt, + }) + } + if value, ok := au.mutation.StoppedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStoppedAt, + }) + } + if au.mutation.StoppedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldStoppedAt, + }) + } + if value, ok := au.mutation.SourceIp(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceIp, + }) + } + if au.mutation.SourceIpCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceIp, + }) + } + if value, ok := au.mutation.SourceRange(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceRange, + }) + } + if au.mutation.SourceRangeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceRange, + }) + } + if value, ok := au.mutation.SourceAsNumber(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsNumber, + }) + } + if au.mutation.SourceAsNumberCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceAsNumber, + }) + } + if value, ok := au.mutation.SourceAsName(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsName, + }) + } + if au.mutation.SourceAsNameCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceAsName, + }) + } + if value, ok := au.mutation.SourceCountry(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceCountry, + }) + } + if au.mutation.SourceCountryCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceCountry, + }) + } + if value, ok := au.mutation.SourceLatitude(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLatitude, + }) + } + if value, ok := au.mutation.AddedSourceLatitude(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLatitude, + }) + } + if au.mutation.SourceLatitudeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Column: alert.FieldSourceLatitude, + }) + } + if value, ok := au.mutation.SourceLongitude(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLongitude, + }) + } + if value, ok := au.mutation.AddedSourceLongitude(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLongitude, + }) + } + if au.mutation.SourceLongitudeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Column: alert.FieldSourceLongitude, + }) + } + if value, ok := au.mutation.SourceScope(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceScope, + }) + } + if au.mutation.SourceScopeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceScope, + }) + } + if value, ok := au.mutation.SourceValue(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceValue, + }) + } + if au.mutation.SourceValueCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceValue, + }) + } + if value, ok := au.mutation.Capacity(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldCapacity, + }) + } + if value, ok := au.mutation.AddedCapacity(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldCapacity, + }) + } + if au.mutation.CapacityCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Column: alert.FieldCapacity, + }) + } + if value, ok := au.mutation.LeakSpeed(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldLeakSpeed, + }) + } + if au.mutation.LeakSpeedCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldLeakSpeed, + }) + } + if value, ok := au.mutation.ScenarioVersion(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioVersion, + }) + } + if au.mutation.ScenarioVersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldScenarioVersion, + }) + } + if value, ok := au.mutation.ScenarioHash(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioHash, + }) + } + if au.mutation.ScenarioHashCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldScenarioHash, + }) + } + if value, ok := au.mutation.Simulated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: alert.FieldSimulated, + }) + } + if au.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: alert.OwnerTable, + Columns: []string{alert.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: alert.OwnerTable, + Columns: []string{alert.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if au.mutation.DecisionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.RemovedDecisionsIDs(); len(nodes) > 0 && !au.mutation.DecisionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.DecisionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if au.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.RemovedEventsIDs(); len(nodes) > 0 && !au.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.EventsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if au.mutation.MetasCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.RemovedMetasIDs(); len(nodes) > 0 && !au.mutation.MetasCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.MetasIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, au.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{alert.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// AlertUpdateOne is the builder for updating a single Alert entity. +type AlertUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AlertMutation +} + +// SetCreatedAt sets the "created_at" field. +func (auo *AlertUpdateOne) SetCreatedAt(t time.Time) *AlertUpdateOne { + auo.mutation.SetCreatedAt(t) + return auo +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (auo *AlertUpdateOne) ClearCreatedAt() *AlertUpdateOne { + auo.mutation.ClearCreatedAt() + return auo +} + +// SetUpdatedAt sets the "updated_at" field. +func (auo *AlertUpdateOne) SetUpdatedAt(t time.Time) *AlertUpdateOne { + auo.mutation.SetUpdatedAt(t) + return auo +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (auo *AlertUpdateOne) ClearUpdatedAt() *AlertUpdateOne { + auo.mutation.ClearUpdatedAt() + return auo +} + +// SetScenario sets the "scenario" field. +func (auo *AlertUpdateOne) SetScenario(s string) *AlertUpdateOne { + auo.mutation.SetScenario(s) + return auo +} + +// SetBucketId sets the "bucketId" field. +func (auo *AlertUpdateOne) SetBucketId(s string) *AlertUpdateOne { + auo.mutation.SetBucketId(s) + return auo +} + +// SetNillableBucketId sets the "bucketId" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableBucketId(s *string) *AlertUpdateOne { + if s != nil { + auo.SetBucketId(*s) + } + return auo +} + +// ClearBucketId clears the value of the "bucketId" field. +func (auo *AlertUpdateOne) ClearBucketId() *AlertUpdateOne { + auo.mutation.ClearBucketId() + return auo +} + +// SetMessage sets the "message" field. +func (auo *AlertUpdateOne) SetMessage(s string) *AlertUpdateOne { + auo.mutation.SetMessage(s) + return auo +} + +// SetNillableMessage sets the "message" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableMessage(s *string) *AlertUpdateOne { + if s != nil { + auo.SetMessage(*s) + } + return auo +} + +// ClearMessage clears the value of the "message" field. +func (auo *AlertUpdateOne) ClearMessage() *AlertUpdateOne { + auo.mutation.ClearMessage() + return auo +} + +// SetEventsCount sets the "eventsCount" field. +func (auo *AlertUpdateOne) SetEventsCount(i int32) *AlertUpdateOne { + auo.mutation.ResetEventsCount() + auo.mutation.SetEventsCount(i) + return auo +} + +// SetNillableEventsCount sets the "eventsCount" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableEventsCount(i *int32) *AlertUpdateOne { + if i != nil { + auo.SetEventsCount(*i) + } + return auo +} + +// AddEventsCount adds i to the "eventsCount" field. +func (auo *AlertUpdateOne) AddEventsCount(i int32) *AlertUpdateOne { + auo.mutation.AddEventsCount(i) + return auo +} + +// ClearEventsCount clears the value of the "eventsCount" field. +func (auo *AlertUpdateOne) ClearEventsCount() *AlertUpdateOne { + auo.mutation.ClearEventsCount() + return auo +} + +// SetStartedAt sets the "startedAt" field. +func (auo *AlertUpdateOne) SetStartedAt(t time.Time) *AlertUpdateOne { + auo.mutation.SetStartedAt(t) + return auo +} + +// SetNillableStartedAt sets the "startedAt" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableStartedAt(t *time.Time) *AlertUpdateOne { + if t != nil { + auo.SetStartedAt(*t) + } + return auo +} + +// ClearStartedAt clears the value of the "startedAt" field. +func (auo *AlertUpdateOne) ClearStartedAt() *AlertUpdateOne { + auo.mutation.ClearStartedAt() + return auo +} + +// SetStoppedAt sets the "stoppedAt" field. +func (auo *AlertUpdateOne) SetStoppedAt(t time.Time) *AlertUpdateOne { + auo.mutation.SetStoppedAt(t) + return auo +} + +// SetNillableStoppedAt sets the "stoppedAt" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableStoppedAt(t *time.Time) *AlertUpdateOne { + if t != nil { + auo.SetStoppedAt(*t) + } + return auo +} + +// ClearStoppedAt clears the value of the "stoppedAt" field. +func (auo *AlertUpdateOne) ClearStoppedAt() *AlertUpdateOne { + auo.mutation.ClearStoppedAt() + return auo +} + +// SetSourceIp sets the "sourceIp" field. +func (auo *AlertUpdateOne) SetSourceIp(s string) *AlertUpdateOne { + auo.mutation.SetSourceIp(s) + return auo +} + +// SetNillableSourceIp sets the "sourceIp" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceIp(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceIp(*s) + } + return auo +} + +// ClearSourceIp clears the value of the "sourceIp" field. +func (auo *AlertUpdateOne) ClearSourceIp() *AlertUpdateOne { + auo.mutation.ClearSourceIp() + return auo +} + +// SetSourceRange sets the "sourceRange" field. +func (auo *AlertUpdateOne) SetSourceRange(s string) *AlertUpdateOne { + auo.mutation.SetSourceRange(s) + return auo +} + +// SetNillableSourceRange sets the "sourceRange" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceRange(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceRange(*s) + } + return auo +} + +// ClearSourceRange clears the value of the "sourceRange" field. +func (auo *AlertUpdateOne) ClearSourceRange() *AlertUpdateOne { + auo.mutation.ClearSourceRange() + return auo +} + +// SetSourceAsNumber sets the "sourceAsNumber" field. +func (auo *AlertUpdateOne) SetSourceAsNumber(s string) *AlertUpdateOne { + auo.mutation.SetSourceAsNumber(s) + return auo +} + +// SetNillableSourceAsNumber sets the "sourceAsNumber" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceAsNumber(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceAsNumber(*s) + } + return auo +} + +// ClearSourceAsNumber clears the value of the "sourceAsNumber" field. +func (auo *AlertUpdateOne) ClearSourceAsNumber() *AlertUpdateOne { + auo.mutation.ClearSourceAsNumber() + return auo +} + +// SetSourceAsName sets the "sourceAsName" field. +func (auo *AlertUpdateOne) SetSourceAsName(s string) *AlertUpdateOne { + auo.mutation.SetSourceAsName(s) + return auo +} + +// SetNillableSourceAsName sets the "sourceAsName" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceAsName(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceAsName(*s) + } + return auo +} + +// ClearSourceAsName clears the value of the "sourceAsName" field. +func (auo *AlertUpdateOne) ClearSourceAsName() *AlertUpdateOne { + auo.mutation.ClearSourceAsName() + return auo +} + +// SetSourceCountry sets the "sourceCountry" field. +func (auo *AlertUpdateOne) SetSourceCountry(s string) *AlertUpdateOne { + auo.mutation.SetSourceCountry(s) + return auo +} + +// SetNillableSourceCountry sets the "sourceCountry" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceCountry(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceCountry(*s) + } + return auo +} + +// ClearSourceCountry clears the value of the "sourceCountry" field. +func (auo *AlertUpdateOne) ClearSourceCountry() *AlertUpdateOne { + auo.mutation.ClearSourceCountry() + return auo +} + +// SetSourceLatitude sets the "sourceLatitude" field. +func (auo *AlertUpdateOne) SetSourceLatitude(f float32) *AlertUpdateOne { + auo.mutation.ResetSourceLatitude() + auo.mutation.SetSourceLatitude(f) + return auo +} + +// SetNillableSourceLatitude sets the "sourceLatitude" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceLatitude(f *float32) *AlertUpdateOne { + if f != nil { + auo.SetSourceLatitude(*f) + } + return auo +} + +// AddSourceLatitude adds f to the "sourceLatitude" field. +func (auo *AlertUpdateOne) AddSourceLatitude(f float32) *AlertUpdateOne { + auo.mutation.AddSourceLatitude(f) + return auo +} + +// ClearSourceLatitude clears the value of the "sourceLatitude" field. +func (auo *AlertUpdateOne) ClearSourceLatitude() *AlertUpdateOne { + auo.mutation.ClearSourceLatitude() + return auo +} + +// SetSourceLongitude sets the "sourceLongitude" field. +func (auo *AlertUpdateOne) SetSourceLongitude(f float32) *AlertUpdateOne { + auo.mutation.ResetSourceLongitude() + auo.mutation.SetSourceLongitude(f) + return auo +} + +// SetNillableSourceLongitude sets the "sourceLongitude" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceLongitude(f *float32) *AlertUpdateOne { + if f != nil { + auo.SetSourceLongitude(*f) + } + return auo +} + +// AddSourceLongitude adds f to the "sourceLongitude" field. +func (auo *AlertUpdateOne) AddSourceLongitude(f float32) *AlertUpdateOne { + auo.mutation.AddSourceLongitude(f) + return auo +} + +// ClearSourceLongitude clears the value of the "sourceLongitude" field. +func (auo *AlertUpdateOne) ClearSourceLongitude() *AlertUpdateOne { + auo.mutation.ClearSourceLongitude() + return auo +} + +// SetSourceScope sets the "sourceScope" field. +func (auo *AlertUpdateOne) SetSourceScope(s string) *AlertUpdateOne { + auo.mutation.SetSourceScope(s) + return auo +} + +// SetNillableSourceScope sets the "sourceScope" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceScope(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceScope(*s) + } + return auo +} + +// ClearSourceScope clears the value of the "sourceScope" field. +func (auo *AlertUpdateOne) ClearSourceScope() *AlertUpdateOne { + auo.mutation.ClearSourceScope() + return auo +} + +// SetSourceValue sets the "sourceValue" field. +func (auo *AlertUpdateOne) SetSourceValue(s string) *AlertUpdateOne { + auo.mutation.SetSourceValue(s) + return auo +} + +// SetNillableSourceValue sets the "sourceValue" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSourceValue(s *string) *AlertUpdateOne { + if s != nil { + auo.SetSourceValue(*s) + } + return auo +} + +// ClearSourceValue clears the value of the "sourceValue" field. +func (auo *AlertUpdateOne) ClearSourceValue() *AlertUpdateOne { + auo.mutation.ClearSourceValue() + return auo +} + +// SetCapacity sets the "capacity" field. +func (auo *AlertUpdateOne) SetCapacity(i int32) *AlertUpdateOne { + auo.mutation.ResetCapacity() + auo.mutation.SetCapacity(i) + return auo +} + +// SetNillableCapacity sets the "capacity" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableCapacity(i *int32) *AlertUpdateOne { + if i != nil { + auo.SetCapacity(*i) + } + return auo +} + +// AddCapacity adds i to the "capacity" field. +func (auo *AlertUpdateOne) AddCapacity(i int32) *AlertUpdateOne { + auo.mutation.AddCapacity(i) + return auo +} + +// ClearCapacity clears the value of the "capacity" field. +func (auo *AlertUpdateOne) ClearCapacity() *AlertUpdateOne { + auo.mutation.ClearCapacity() + return auo +} + +// SetLeakSpeed sets the "leakSpeed" field. +func (auo *AlertUpdateOne) SetLeakSpeed(s string) *AlertUpdateOne { + auo.mutation.SetLeakSpeed(s) + return auo +} + +// SetNillableLeakSpeed sets the "leakSpeed" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableLeakSpeed(s *string) *AlertUpdateOne { + if s != nil { + auo.SetLeakSpeed(*s) + } + return auo +} + +// ClearLeakSpeed clears the value of the "leakSpeed" field. +func (auo *AlertUpdateOne) ClearLeakSpeed() *AlertUpdateOne { + auo.mutation.ClearLeakSpeed() + return auo +} + +// SetScenarioVersion sets the "scenarioVersion" field. +func (auo *AlertUpdateOne) SetScenarioVersion(s string) *AlertUpdateOne { + auo.mutation.SetScenarioVersion(s) + return auo +} + +// SetNillableScenarioVersion sets the "scenarioVersion" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableScenarioVersion(s *string) *AlertUpdateOne { + if s != nil { + auo.SetScenarioVersion(*s) + } + return auo +} + +// ClearScenarioVersion clears the value of the "scenarioVersion" field. +func (auo *AlertUpdateOne) ClearScenarioVersion() *AlertUpdateOne { + auo.mutation.ClearScenarioVersion() + return auo +} + +// SetScenarioHash sets the "scenarioHash" field. +func (auo *AlertUpdateOne) SetScenarioHash(s string) *AlertUpdateOne { + auo.mutation.SetScenarioHash(s) + return auo +} + +// SetNillableScenarioHash sets the "scenarioHash" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableScenarioHash(s *string) *AlertUpdateOne { + if s != nil { + auo.SetScenarioHash(*s) + } + return auo +} + +// ClearScenarioHash clears the value of the "scenarioHash" field. +func (auo *AlertUpdateOne) ClearScenarioHash() *AlertUpdateOne { + auo.mutation.ClearScenarioHash() + return auo +} + +// SetSimulated sets the "simulated" field. +func (auo *AlertUpdateOne) SetSimulated(b bool) *AlertUpdateOne { + auo.mutation.SetSimulated(b) + return auo +} + +// SetNillableSimulated sets the "simulated" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableSimulated(b *bool) *AlertUpdateOne { + if b != nil { + auo.SetSimulated(*b) + } + return auo +} + +// SetOwnerID sets the "owner" edge to the Machine entity by ID. +func (auo *AlertUpdateOne) SetOwnerID(id int) *AlertUpdateOne { + auo.mutation.SetOwnerID(id) + return auo +} + +// SetNillableOwnerID sets the "owner" edge to the Machine entity by ID if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableOwnerID(id *int) *AlertUpdateOne { + if id != nil { + auo = auo.SetOwnerID(*id) + } + return auo +} + +// SetOwner sets the "owner" edge to the Machine entity. +func (auo *AlertUpdateOne) SetOwner(m *Machine) *AlertUpdateOne { + return auo.SetOwnerID(m.ID) +} + +// AddDecisionIDs adds the "decisions" edge to the Decision entity by IDs. +func (auo *AlertUpdateOne) AddDecisionIDs(ids ...int) *AlertUpdateOne { + auo.mutation.AddDecisionIDs(ids...) + return auo +} + +// AddDecisions adds the "decisions" edges to the Decision entity. +func (auo *AlertUpdateOne) AddDecisions(d ...*Decision) *AlertUpdateOne { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return auo.AddDecisionIDs(ids...) +} + +// AddEventIDs adds the "events" edge to the Event entity by IDs. +func (auo *AlertUpdateOne) AddEventIDs(ids ...int) *AlertUpdateOne { + auo.mutation.AddEventIDs(ids...) + return auo +} + +// AddEvents adds the "events" edges to the Event entity. +func (auo *AlertUpdateOne) AddEvents(e ...*Event) *AlertUpdateOne { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return auo.AddEventIDs(ids...) +} + +// AddMetaIDs adds the "metas" edge to the Meta entity by IDs. +func (auo *AlertUpdateOne) AddMetaIDs(ids ...int) *AlertUpdateOne { + auo.mutation.AddMetaIDs(ids...) + return auo +} + +// AddMetas adds the "metas" edges to the Meta entity. +func (auo *AlertUpdateOne) AddMetas(m ...*Meta) *AlertUpdateOne { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return auo.AddMetaIDs(ids...) +} + +// Mutation returns the AlertMutation object of the builder. +func (auo *AlertUpdateOne) Mutation() *AlertMutation { + return auo.mutation +} + +// ClearOwner clears the "owner" edge to the Machine entity. +func (auo *AlertUpdateOne) ClearOwner() *AlertUpdateOne { + auo.mutation.ClearOwner() + return auo +} + +// ClearDecisions clears all "decisions" edges to the Decision entity. +func (auo *AlertUpdateOne) ClearDecisions() *AlertUpdateOne { + auo.mutation.ClearDecisions() + return auo +} + +// RemoveDecisionIDs removes the "decisions" edge to Decision entities by IDs. +func (auo *AlertUpdateOne) RemoveDecisionIDs(ids ...int) *AlertUpdateOne { + auo.mutation.RemoveDecisionIDs(ids...) + return auo +} + +// RemoveDecisions removes "decisions" edges to Decision entities. +func (auo *AlertUpdateOne) RemoveDecisions(d ...*Decision) *AlertUpdateOne { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return auo.RemoveDecisionIDs(ids...) +} + +// ClearEvents clears all "events" edges to the Event entity. +func (auo *AlertUpdateOne) ClearEvents() *AlertUpdateOne { + auo.mutation.ClearEvents() + return auo +} + +// RemoveEventIDs removes the "events" edge to Event entities by IDs. +func (auo *AlertUpdateOne) RemoveEventIDs(ids ...int) *AlertUpdateOne { + auo.mutation.RemoveEventIDs(ids...) + return auo +} + +// RemoveEvents removes "events" edges to Event entities. +func (auo *AlertUpdateOne) RemoveEvents(e ...*Event) *AlertUpdateOne { + ids := make([]int, len(e)) + for i := range e { + ids[i] = e[i].ID + } + return auo.RemoveEventIDs(ids...) +} + +// ClearMetas clears all "metas" edges to the Meta entity. +func (auo *AlertUpdateOne) ClearMetas() *AlertUpdateOne { + auo.mutation.ClearMetas() + return auo +} + +// RemoveMetaIDs removes the "metas" edge to Meta entities by IDs. +func (auo *AlertUpdateOne) RemoveMetaIDs(ids ...int) *AlertUpdateOne { + auo.mutation.RemoveMetaIDs(ids...) + return auo +} + +// RemoveMetas removes "metas" edges to Meta entities. +func (auo *AlertUpdateOne) RemoveMetas(m ...*Meta) *AlertUpdateOne { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return auo.RemoveMetaIDs(ids...) +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (auo *AlertUpdateOne) Select(field string, fields ...string) *AlertUpdateOne { + auo.fields = append([]string{field}, fields...) + return auo +} + +// Save executes the query and returns the updated Alert entity. +func (auo *AlertUpdateOne) Save(ctx context.Context) (*Alert, error) { + var ( + err error + node *Alert + ) + auo.defaults() + if len(auo.hooks) == 0 { + node, err = auo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + auo.mutation = mutation + node, err = auo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(auo.hooks) - 1; i >= 0; i-- { + if auo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = auo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, auo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Alert) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from AlertMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (auo *AlertUpdateOne) SaveX(ctx context.Context) *Alert { + node, err := auo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (auo *AlertUpdateOne) Exec(ctx context.Context) error { + _, err := auo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (auo *AlertUpdateOne) ExecX(ctx context.Context) { + if err := auo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (auo *AlertUpdateOne) defaults() { + if _, ok := auo.mutation.CreatedAt(); !ok && !auo.mutation.CreatedAtCleared() { + v := alert.UpdateDefaultCreatedAt() + auo.mutation.SetCreatedAt(v) + } + if _, ok := auo.mutation.UpdatedAt(); !ok && !auo.mutation.UpdatedAtCleared() { + v := alert.UpdateDefaultUpdatedAt() + auo.mutation.SetUpdatedAt(v) + } +} + +func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: alert.Table, + Columns: alert.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + id, ok := auo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Alert.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := auo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, alert.FieldID) + for _, f := range fields { + if !alert.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != alert.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := auo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := auo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldCreatedAt, + }) + } + if auo.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldCreatedAt, + }) + } + if value, ok := auo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldUpdatedAt, + }) + } + if auo.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldUpdatedAt, + }) + } + if value, ok := auo.mutation.Scenario(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenario, + }) + } + if value, ok := auo.mutation.BucketId(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldBucketId, + }) + } + if auo.mutation.BucketIdCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldBucketId, + }) + } + if value, ok := auo.mutation.Message(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldMessage, + }) + } + if auo.mutation.MessageCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldMessage, + }) + } + if value, ok := auo.mutation.EventsCount(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldEventsCount, + }) + } + if value, ok := auo.mutation.AddedEventsCount(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldEventsCount, + }) + } + if auo.mutation.EventsCountCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Column: alert.FieldEventsCount, + }) + } + if value, ok := auo.mutation.StartedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStartedAt, + }) + } + if auo.mutation.StartedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldStartedAt, + }) + } + if value, ok := auo.mutation.StoppedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: alert.FieldStoppedAt, + }) + } + if auo.mutation.StoppedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: alert.FieldStoppedAt, + }) + } + if value, ok := auo.mutation.SourceIp(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceIp, + }) + } + if auo.mutation.SourceIpCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceIp, + }) + } + if value, ok := auo.mutation.SourceRange(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceRange, + }) + } + if auo.mutation.SourceRangeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceRange, + }) + } + if value, ok := auo.mutation.SourceAsNumber(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsNumber, + }) + } + if auo.mutation.SourceAsNumberCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceAsNumber, + }) + } + if value, ok := auo.mutation.SourceAsName(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceAsName, + }) + } + if auo.mutation.SourceAsNameCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceAsName, + }) + } + if value, ok := auo.mutation.SourceCountry(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceCountry, + }) + } + if auo.mutation.SourceCountryCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceCountry, + }) + } + if value, ok := auo.mutation.SourceLatitude(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLatitude, + }) + } + if value, ok := auo.mutation.AddedSourceLatitude(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLatitude, + }) + } + if auo.mutation.SourceLatitudeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Column: alert.FieldSourceLatitude, + }) + } + if value, ok := auo.mutation.SourceLongitude(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLongitude, + }) + } + if value, ok := auo.mutation.AddedSourceLongitude(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Value: value, + Column: alert.FieldSourceLongitude, + }) + } + if auo.mutation.SourceLongitudeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeFloat32, + Column: alert.FieldSourceLongitude, + }) + } + if value, ok := auo.mutation.SourceScope(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceScope, + }) + } + if auo.mutation.SourceScopeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceScope, + }) + } + if value, ok := auo.mutation.SourceValue(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldSourceValue, + }) + } + if auo.mutation.SourceValueCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldSourceValue, + }) + } + if value, ok := auo.mutation.Capacity(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldCapacity, + }) + } + if value, ok := auo.mutation.AddedCapacity(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Value: value, + Column: alert.FieldCapacity, + }) + } + if auo.mutation.CapacityCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt32, + Column: alert.FieldCapacity, + }) + } + if value, ok := auo.mutation.LeakSpeed(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldLeakSpeed, + }) + } + if auo.mutation.LeakSpeedCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldLeakSpeed, + }) + } + if value, ok := auo.mutation.ScenarioVersion(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioVersion, + }) + } + if auo.mutation.ScenarioVersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldScenarioVersion, + }) + } + if value, ok := auo.mutation.ScenarioHash(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: alert.FieldScenarioHash, + }) + } + if auo.mutation.ScenarioHashCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: alert.FieldScenarioHash, + }) + } + if value, ok := auo.mutation.Simulated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: alert.FieldSimulated, + }) + } + if auo.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: alert.OwnerTable, + Columns: []string{alert.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: alert.OwnerTable, + Columns: []string{alert.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if auo.mutation.DecisionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.RemovedDecisionsIDs(); len(nodes) > 0 && !auo.mutation.DecisionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.DecisionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.DecisionsTable, + Columns: []string{alert.DecisionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if auo.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.RemovedEventsIDs(); len(nodes) > 0 && !auo.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.EventsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.EventsTable, + Columns: []string{alert.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if auo.mutation.MetasCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.RemovedMetasIDs(); len(nodes) > 0 && !auo.mutation.MetasCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.MetasIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: alert.MetasTable, + Columns: []string{alert.MetasColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Alert{config: auo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, auo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{alert.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go new file mode 100644 index 0000000..068fc6c --- /dev/null +++ b/pkg/database/ent/bouncer.go @@ -0,0 +1,220 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" +) + +// Bouncer is the model entity for the Bouncer schema. +type Bouncer struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt *time.Time `json:"created_at"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt *time.Time `json:"updated_at"` + // Name holds the value of the "name" field. + Name string `json:"name"` + // APIKey holds the value of the "api_key" field. + APIKey string `json:"api_key"` + // Revoked holds the value of the "revoked" field. + Revoked bool `json:"revoked"` + // IPAddress holds the value of the "ip_address" field. + IPAddress string `json:"ip_address"` + // Type holds the value of the "type" field. + Type string `json:"type"` + // Version holds the value of the "version" field. + Version string `json:"version"` + // Until holds the value of the "until" field. + Until time.Time `json:"until"` + // LastPull holds the value of the "last_pull" field. + LastPull time.Time `json:"last_pull"` + // AuthType holds the value of the "auth_type" field. + AuthType string `json:"auth_type"` +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Bouncer) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case bouncer.FieldRevoked: + values[i] = new(sql.NullBool) + case bouncer.FieldID: + values[i] = new(sql.NullInt64) + case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion, bouncer.FieldAuthType: + values[i] = new(sql.NullString) + case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull: + values[i] = new(sql.NullTime) + default: + return nil, fmt.Errorf("unexpected column %q for type Bouncer", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Bouncer fields. +func (b *Bouncer) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case bouncer.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + b.ID = int(value.Int64) + case bouncer.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + b.CreatedAt = new(time.Time) + *b.CreatedAt = value.Time + } + case bouncer.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + b.UpdatedAt = new(time.Time) + *b.UpdatedAt = value.Time + } + case bouncer.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + b.Name = value.String + } + case bouncer.FieldAPIKey: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field api_key", values[i]) + } else if value.Valid { + b.APIKey = value.String + } + case bouncer.FieldRevoked: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field revoked", values[i]) + } else if value.Valid { + b.Revoked = value.Bool + } + case bouncer.FieldIPAddress: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field ip_address", values[i]) + } else if value.Valid { + b.IPAddress = value.String + } + case bouncer.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + b.Type = value.String + } + case bouncer.FieldVersion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field version", values[i]) + } else if value.Valid { + b.Version = value.String + } + case bouncer.FieldUntil: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field until", values[i]) + } else if value.Valid { + b.Until = value.Time + } + case bouncer.FieldLastPull: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_pull", values[i]) + } else if value.Valid { + b.LastPull = value.Time + } + case bouncer.FieldAuthType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field auth_type", values[i]) + } else if value.Valid { + b.AuthType = value.String + } + } + } + return nil +} + +// Update returns a builder for updating this Bouncer. +// Note that you need to call Bouncer.Unwrap() before calling this method if this Bouncer +// was returned from a transaction, and the transaction was committed or rolled back. +func (b *Bouncer) Update() *BouncerUpdateOne { + return (&BouncerClient{config: b.config}).UpdateOne(b) +} + +// Unwrap unwraps the Bouncer entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (b *Bouncer) Unwrap() *Bouncer { + _tx, ok := b.config.driver.(*txDriver) + if !ok { + panic("ent: Bouncer is not a transactional entity") + } + b.config.driver = _tx.drv + return b +} + +// String implements the fmt.Stringer. +func (b *Bouncer) String() string { + var builder strings.Builder + builder.WriteString("Bouncer(") + builder.WriteString(fmt.Sprintf("id=%v, ", b.ID)) + if v := b.CreatedAt; v != nil { + builder.WriteString("created_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := b.UpdatedAt; v != nil { + builder.WriteString("updated_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(b.Name) + builder.WriteString(", ") + builder.WriteString("api_key=") + builder.WriteString(b.APIKey) + builder.WriteString(", ") + builder.WriteString("revoked=") + builder.WriteString(fmt.Sprintf("%v", b.Revoked)) + builder.WriteString(", ") + builder.WriteString("ip_address=") + builder.WriteString(b.IPAddress) + builder.WriteString(", ") + builder.WriteString("type=") + builder.WriteString(b.Type) + builder.WriteString(", ") + builder.WriteString("version=") + builder.WriteString(b.Version) + builder.WriteString(", ") + builder.WriteString("until=") + builder.WriteString(b.Until.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("last_pull=") + builder.WriteString(b.LastPull.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("auth_type=") + builder.WriteString(b.AuthType) + builder.WriteByte(')') + return builder.String() +} + +// Bouncers is a parsable slice of Bouncer. +type Bouncers []*Bouncer + +func (b Bouncers) config(cfg config) { + for _i := range b { + b[_i].config = cfg + } +} diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go new file mode 100644 index 0000000..b688594 --- /dev/null +++ b/pkg/database/ent/bouncer/bouncer.go @@ -0,0 +1,83 @@ +// Code generated by ent, DO NOT EDIT. + +package bouncer + +import ( + "time" +) + +const ( + // Label holds the string label denoting the bouncer type in the database. + Label = "bouncer" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldAPIKey holds the string denoting the api_key field in the database. + FieldAPIKey = "api_key" + // FieldRevoked holds the string denoting the revoked field in the database. + FieldRevoked = "revoked" + // FieldIPAddress holds the string denoting the ip_address field in the database. + FieldIPAddress = "ip_address" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldVersion holds the string denoting the version field in the database. + FieldVersion = "version" + // FieldUntil holds the string denoting the until field in the database. + FieldUntil = "until" + // FieldLastPull holds the string denoting the last_pull field in the database. + FieldLastPull = "last_pull" + // FieldAuthType holds the string denoting the auth_type field in the database. + FieldAuthType = "auth_type" + // Table holds the table name of the bouncer in the database. + Table = "bouncers" +) + +// Columns holds all SQL columns for bouncer fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldName, + FieldAPIKey, + FieldRevoked, + FieldIPAddress, + FieldType, + FieldVersion, + FieldUntil, + FieldLastPull, + FieldAuthType, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. + UpdateDefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DefaultIPAddress holds the default value on creation for the "ip_address" field. + DefaultIPAddress string + // DefaultUntil holds the default value on creation for the "until" field. + DefaultUntil func() time.Time + // DefaultLastPull holds the default value on creation for the "last_pull" field. + DefaultLastPull func() time.Time + // DefaultAuthType holds the default value on creation for the "auth_type" field. + DefaultAuthType string +) diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go new file mode 100644 index 0000000..03a543f --- /dev/null +++ b/pkg/database/ent/bouncer/where.go @@ -0,0 +1,1138 @@ +// Code generated by ent, DO NOT EDIT. + +package bouncer + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }) +} + +// APIKey applies equality check predicate on the "api_key" field. It's identical to APIKeyEQ. +func APIKey(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldAPIKey), v)) + }) +} + +// Revoked applies equality check predicate on the "revoked" field. It's identical to RevokedEQ. +func Revoked(v bool) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldRevoked), v)) + }) +} + +// IPAddress applies equality check predicate on the "ip_address" field. It's identical to IPAddressEQ. +func IPAddress(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIPAddress), v)) + }) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }) +} + +// Version applies equality check predicate on the "version" field. It's identical to VersionEQ. +func Version(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldVersion), v)) + }) +} + +// Until applies equality check predicate on the "until" field. It's identical to UntilEQ. +func Until(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUntil), v)) + }) +} + +// LastPull applies equality check predicate on the "last_pull" field. It's identical to LastPullEQ. +func LastPull(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLastPull), v)) + }) +} + +// AuthType applies equality check predicate on the "auth_type" field. It's identical to AuthTypeEQ. +func AuthType(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldAuthType), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. +func CreatedAtIsNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldCreatedAt))) + }) +} + +// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. +func CreatedAtNotNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldCreatedAt))) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. +func UpdatedAtIsNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldUpdatedAt))) + }) +} + +// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. +func UpdatedAtNotNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldUpdatedAt))) + }) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldName), v)) + }) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldName), v)) + }) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldName), v...)) + }) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldName), v...)) + }) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldName), v)) + }) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldName), v)) + }) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldName), v)) + }) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldName), v)) + }) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldName), v)) + }) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldName), v)) + }) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldName), v)) + }) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldName), v)) + }) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldName), v)) + }) +} + +// APIKeyEQ applies the EQ predicate on the "api_key" field. +func APIKeyEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyNEQ applies the NEQ predicate on the "api_key" field. +func APIKeyNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyIn applies the In predicate on the "api_key" field. +func APIKeyIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldAPIKey), v...)) + }) +} + +// APIKeyNotIn applies the NotIn predicate on the "api_key" field. +func APIKeyNotIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldAPIKey), v...)) + }) +} + +// APIKeyGT applies the GT predicate on the "api_key" field. +func APIKeyGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyGTE applies the GTE predicate on the "api_key" field. +func APIKeyGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyLT applies the LT predicate on the "api_key" field. +func APIKeyLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyLTE applies the LTE predicate on the "api_key" field. +func APIKeyLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyContains applies the Contains predicate on the "api_key" field. +func APIKeyContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyHasPrefix applies the HasPrefix predicate on the "api_key" field. +func APIKeyHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyHasSuffix applies the HasSuffix predicate on the "api_key" field. +func APIKeyHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyEqualFold applies the EqualFold predicate on the "api_key" field. +func APIKeyEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldAPIKey), v)) + }) +} + +// APIKeyContainsFold applies the ContainsFold predicate on the "api_key" field. +func APIKeyContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldAPIKey), v)) + }) +} + +// RevokedEQ applies the EQ predicate on the "revoked" field. +func RevokedEQ(v bool) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldRevoked), v)) + }) +} + +// RevokedNEQ applies the NEQ predicate on the "revoked" field. +func RevokedNEQ(v bool) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldRevoked), v)) + }) +} + +// IPAddressEQ applies the EQ predicate on the "ip_address" field. +func IPAddressEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressNEQ applies the NEQ predicate on the "ip_address" field. +func IPAddressNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressIn applies the In predicate on the "ip_address" field. +func IPAddressIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldIPAddress), v...)) + }) +} + +// IPAddressNotIn applies the NotIn predicate on the "ip_address" field. +func IPAddressNotIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldIPAddress), v...)) + }) +} + +// IPAddressGT applies the GT predicate on the "ip_address" field. +func IPAddressGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressGTE applies the GTE predicate on the "ip_address" field. +func IPAddressGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressLT applies the LT predicate on the "ip_address" field. +func IPAddressLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressLTE applies the LTE predicate on the "ip_address" field. +func IPAddressLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressContains applies the Contains predicate on the "ip_address" field. +func IPAddressContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressHasPrefix applies the HasPrefix predicate on the "ip_address" field. +func IPAddressHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressHasSuffix applies the HasSuffix predicate on the "ip_address" field. +func IPAddressHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressIsNil applies the IsNil predicate on the "ip_address" field. +func IPAddressIsNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldIPAddress))) + }) +} + +// IPAddressNotNil applies the NotNil predicate on the "ip_address" field. +func IPAddressNotNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldIPAddress))) + }) +} + +// IPAddressEqualFold applies the EqualFold predicate on the "ip_address" field. +func IPAddressEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldIPAddress), v)) + }) +} + +// IPAddressContainsFold applies the ContainsFold predicate on the "ip_address" field. +func IPAddressContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldIPAddress), v)) + }) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldType), v)) + }) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldType), v...)) + }) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldType), v...)) + }) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldType), v)) + }) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldType), v)) + }) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldType), v)) + }) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldType), v)) + }) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldType), v)) + }) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldType), v)) + }) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldType), v)) + }) +} + +// TypeIsNil applies the IsNil predicate on the "type" field. +func TypeIsNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldType))) + }) +} + +// TypeNotNil applies the NotNil predicate on the "type" field. +func TypeNotNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldType))) + }) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldType), v)) + }) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldType), v)) + }) +} + +// VersionEQ applies the EQ predicate on the "version" field. +func VersionEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldVersion), v)) + }) +} + +// VersionNEQ applies the NEQ predicate on the "version" field. +func VersionNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldVersion), v)) + }) +} + +// VersionIn applies the In predicate on the "version" field. +func VersionIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldVersion), v...)) + }) +} + +// VersionNotIn applies the NotIn predicate on the "version" field. +func VersionNotIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldVersion), v...)) + }) +} + +// VersionGT applies the GT predicate on the "version" field. +func VersionGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldVersion), v)) + }) +} + +// VersionGTE applies the GTE predicate on the "version" field. +func VersionGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldVersion), v)) + }) +} + +// VersionLT applies the LT predicate on the "version" field. +func VersionLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldVersion), v)) + }) +} + +// VersionLTE applies the LTE predicate on the "version" field. +func VersionLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldVersion), v)) + }) +} + +// VersionContains applies the Contains predicate on the "version" field. +func VersionContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldVersion), v)) + }) +} + +// VersionHasPrefix applies the HasPrefix predicate on the "version" field. +func VersionHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldVersion), v)) + }) +} + +// VersionHasSuffix applies the HasSuffix predicate on the "version" field. +func VersionHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldVersion), v)) + }) +} + +// VersionIsNil applies the IsNil predicate on the "version" field. +func VersionIsNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldVersion))) + }) +} + +// VersionNotNil applies the NotNil predicate on the "version" field. +func VersionNotNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldVersion))) + }) +} + +// VersionEqualFold applies the EqualFold predicate on the "version" field. +func VersionEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldVersion), v)) + }) +} + +// VersionContainsFold applies the ContainsFold predicate on the "version" field. +func VersionContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldVersion), v)) + }) +} + +// UntilEQ applies the EQ predicate on the "until" field. +func UntilEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUntil), v)) + }) +} + +// UntilNEQ applies the NEQ predicate on the "until" field. +func UntilNEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUntil), v)) + }) +} + +// UntilIn applies the In predicate on the "until" field. +func UntilIn(vs ...time.Time) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUntil), v...)) + }) +} + +// UntilNotIn applies the NotIn predicate on the "until" field. +func UntilNotIn(vs ...time.Time) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUntil), v...)) + }) +} + +// UntilGT applies the GT predicate on the "until" field. +func UntilGT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUntil), v)) + }) +} + +// UntilGTE applies the GTE predicate on the "until" field. +func UntilGTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUntil), v)) + }) +} + +// UntilLT applies the LT predicate on the "until" field. +func UntilLT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUntil), v)) + }) +} + +// UntilLTE applies the LTE predicate on the "until" field. +func UntilLTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUntil), v)) + }) +} + +// UntilIsNil applies the IsNil predicate on the "until" field. +func UntilIsNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldUntil))) + }) +} + +// UntilNotNil applies the NotNil predicate on the "until" field. +func UntilNotNil() predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldUntil))) + }) +} + +// LastPullEQ applies the EQ predicate on the "last_pull" field. +func LastPullEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLastPull), v)) + }) +} + +// LastPullNEQ applies the NEQ predicate on the "last_pull" field. +func LastPullNEQ(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldLastPull), v)) + }) +} + +// LastPullIn applies the In predicate on the "last_pull" field. +func LastPullIn(vs ...time.Time) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldLastPull), v...)) + }) +} + +// LastPullNotIn applies the NotIn predicate on the "last_pull" field. +func LastPullNotIn(vs ...time.Time) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldLastPull), v...)) + }) +} + +// LastPullGT applies the GT predicate on the "last_pull" field. +func LastPullGT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldLastPull), v)) + }) +} + +// LastPullGTE applies the GTE predicate on the "last_pull" field. +func LastPullGTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldLastPull), v)) + }) +} + +// LastPullLT applies the LT predicate on the "last_pull" field. +func LastPullLT(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldLastPull), v)) + }) +} + +// LastPullLTE applies the LTE predicate on the "last_pull" field. +func LastPullLTE(v time.Time) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldLastPull), v)) + }) +} + +// AuthTypeEQ applies the EQ predicate on the "auth_type" field. +func AuthTypeEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeNEQ applies the NEQ predicate on the "auth_type" field. +func AuthTypeNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeIn applies the In predicate on the "auth_type" field. +func AuthTypeIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldAuthType), v...)) + }) +} + +// AuthTypeNotIn applies the NotIn predicate on the "auth_type" field. +func AuthTypeNotIn(vs ...string) predicate.Bouncer { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldAuthType), v...)) + }) +} + +// AuthTypeGT applies the GT predicate on the "auth_type" field. +func AuthTypeGT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeGTE applies the GTE predicate on the "auth_type" field. +func AuthTypeGTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeLT applies the LT predicate on the "auth_type" field. +func AuthTypeLT(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeLTE applies the LTE predicate on the "auth_type" field. +func AuthTypeLTE(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeContains applies the Contains predicate on the "auth_type" field. +func AuthTypeContains(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeHasPrefix applies the HasPrefix predicate on the "auth_type" field. +func AuthTypeHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeHasSuffix applies the HasSuffix predicate on the "auth_type" field. +func AuthTypeHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeEqualFold applies the EqualFold predicate on the "auth_type" field. +func AuthTypeEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeContainsFold applies the ContainsFold predicate on the "auth_type" field. +func AuthTypeContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldAuthType), v)) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Bouncer) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Bouncer) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Bouncer) predicate.Bouncer { + return predicate.Bouncer(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go new file mode 100644 index 0000000..685ce08 --- /dev/null +++ b/pkg/database/ent/bouncer_create.go @@ -0,0 +1,473 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" +) + +// BouncerCreate is the builder for creating a Bouncer entity. +type BouncerCreate struct { + config + mutation *BouncerMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (bc *BouncerCreate) SetCreatedAt(t time.Time) *BouncerCreate { + bc.mutation.SetCreatedAt(t) + return bc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableCreatedAt(t *time.Time) *BouncerCreate { + if t != nil { + bc.SetCreatedAt(*t) + } + return bc +} + +// SetUpdatedAt sets the "updated_at" field. +func (bc *BouncerCreate) SetUpdatedAt(t time.Time) *BouncerCreate { + bc.mutation.SetUpdatedAt(t) + return bc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableUpdatedAt(t *time.Time) *BouncerCreate { + if t != nil { + bc.SetUpdatedAt(*t) + } + return bc +} + +// SetName sets the "name" field. +func (bc *BouncerCreate) SetName(s string) *BouncerCreate { + bc.mutation.SetName(s) + return bc +} + +// SetAPIKey sets the "api_key" field. +func (bc *BouncerCreate) SetAPIKey(s string) *BouncerCreate { + bc.mutation.SetAPIKey(s) + return bc +} + +// SetRevoked sets the "revoked" field. +func (bc *BouncerCreate) SetRevoked(b bool) *BouncerCreate { + bc.mutation.SetRevoked(b) + return bc +} + +// SetIPAddress sets the "ip_address" field. +func (bc *BouncerCreate) SetIPAddress(s string) *BouncerCreate { + bc.mutation.SetIPAddress(s) + return bc +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableIPAddress(s *string) *BouncerCreate { + if s != nil { + bc.SetIPAddress(*s) + } + return bc +} + +// SetType sets the "type" field. +func (bc *BouncerCreate) SetType(s string) *BouncerCreate { + bc.mutation.SetType(s) + return bc +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableType(s *string) *BouncerCreate { + if s != nil { + bc.SetType(*s) + } + return bc +} + +// SetVersion sets the "version" field. +func (bc *BouncerCreate) SetVersion(s string) *BouncerCreate { + bc.mutation.SetVersion(s) + return bc +} + +// SetNillableVersion sets the "version" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableVersion(s *string) *BouncerCreate { + if s != nil { + bc.SetVersion(*s) + } + return bc +} + +// SetUntil sets the "until" field. +func (bc *BouncerCreate) SetUntil(t time.Time) *BouncerCreate { + bc.mutation.SetUntil(t) + return bc +} + +// SetNillableUntil sets the "until" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableUntil(t *time.Time) *BouncerCreate { + if t != nil { + bc.SetUntil(*t) + } + return bc +} + +// SetLastPull sets the "last_pull" field. +func (bc *BouncerCreate) SetLastPull(t time.Time) *BouncerCreate { + bc.mutation.SetLastPull(t) + return bc +} + +// SetNillableLastPull sets the "last_pull" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableLastPull(t *time.Time) *BouncerCreate { + if t != nil { + bc.SetLastPull(*t) + } + return bc +} + +// SetAuthType sets the "auth_type" field. +func (bc *BouncerCreate) SetAuthType(s string) *BouncerCreate { + bc.mutation.SetAuthType(s) + return bc +} + +// SetNillableAuthType sets the "auth_type" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableAuthType(s *string) *BouncerCreate { + if s != nil { + bc.SetAuthType(*s) + } + return bc +} + +// Mutation returns the BouncerMutation object of the builder. +func (bc *BouncerCreate) Mutation() *BouncerMutation { + return bc.mutation +} + +// Save creates the Bouncer in the database. +func (bc *BouncerCreate) Save(ctx context.Context) (*Bouncer, error) { + var ( + err error + node *Bouncer + ) + bc.defaults() + if len(bc.hooks) == 0 { + if err = bc.check(); err != nil { + return nil, err + } + node, err = bc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = bc.check(); err != nil { + return nil, err + } + bc.mutation = mutation + if node, err = bc.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(bc.hooks) - 1; i >= 0; i-- { + if bc.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = bc.hooks[i](mut) + } + v, err := mut.Mutate(ctx, bc.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Bouncer) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from BouncerMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (bc *BouncerCreate) SaveX(ctx context.Context) *Bouncer { + v, err := bc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (bc *BouncerCreate) Exec(ctx context.Context) error { + _, err := bc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (bc *BouncerCreate) ExecX(ctx context.Context) { + if err := bc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (bc *BouncerCreate) defaults() { + if _, ok := bc.mutation.CreatedAt(); !ok { + v := bouncer.DefaultCreatedAt() + bc.mutation.SetCreatedAt(v) + } + if _, ok := bc.mutation.UpdatedAt(); !ok { + v := bouncer.DefaultUpdatedAt() + bc.mutation.SetUpdatedAt(v) + } + if _, ok := bc.mutation.IPAddress(); !ok { + v := bouncer.DefaultIPAddress + bc.mutation.SetIPAddress(v) + } + if _, ok := bc.mutation.Until(); !ok { + v := bouncer.DefaultUntil() + bc.mutation.SetUntil(v) + } + if _, ok := bc.mutation.LastPull(); !ok { + v := bouncer.DefaultLastPull() + bc.mutation.SetLastPull(v) + } + if _, ok := bc.mutation.AuthType(); !ok { + v := bouncer.DefaultAuthType + bc.mutation.SetAuthType(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (bc *BouncerCreate) check() error { + if _, ok := bc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Bouncer.name"`)} + } + if _, ok := bc.mutation.APIKey(); !ok { + return &ValidationError{Name: "api_key", err: errors.New(`ent: missing required field "Bouncer.api_key"`)} + } + if _, ok := bc.mutation.Revoked(); !ok { + return &ValidationError{Name: "revoked", err: errors.New(`ent: missing required field "Bouncer.revoked"`)} + } + if _, ok := bc.mutation.LastPull(); !ok { + return &ValidationError{Name: "last_pull", err: errors.New(`ent: missing required field "Bouncer.last_pull"`)} + } + if _, ok := bc.mutation.AuthType(); !ok { + return &ValidationError{Name: "auth_type", err: errors.New(`ent: missing required field "Bouncer.auth_type"`)} + } + return nil +} + +func (bc *BouncerCreate) sqlSave(ctx context.Context) (*Bouncer, error) { + _node, _spec := bc.createSpec() + if err := sqlgraph.CreateNode(ctx, bc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { + var ( + _node = &Bouncer{config: bc.config} + _spec = &sqlgraph.CreateSpec{ + Table: bouncer.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: bouncer.FieldID, + }, + } + ) + if value, ok := bc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldCreatedAt, + }) + _node.CreatedAt = &value + } + if value, ok := bc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUpdatedAt, + }) + _node.UpdatedAt = &value + } + if value, ok := bc.mutation.Name(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldName, + }) + _node.Name = value + } + if value, ok := bc.mutation.APIKey(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldAPIKey, + }) + _node.APIKey = value + } + if value, ok := bc.mutation.Revoked(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: bouncer.FieldRevoked, + }) + _node.Revoked = value + } + if value, ok := bc.mutation.IPAddress(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldIPAddress, + }) + _node.IPAddress = value + } + if value, ok := bc.mutation.GetType(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldType, + }) + _node.Type = value + } + if value, ok := bc.mutation.Version(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldVersion, + }) + _node.Version = value + } + if value, ok := bc.mutation.Until(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUntil, + }) + _node.Until = value + } + if value, ok := bc.mutation.LastPull(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldLastPull, + }) + _node.LastPull = value + } + if value, ok := bc.mutation.AuthType(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldAuthType, + }) + _node.AuthType = value + } + return _node, _spec +} + +// BouncerCreateBulk is the builder for creating many Bouncer entities in bulk. +type BouncerCreateBulk struct { + config + builders []*BouncerCreate +} + +// Save creates the Bouncer entities in the database. +func (bcb *BouncerCreateBulk) Save(ctx context.Context) ([]*Bouncer, error) { + specs := make([]*sqlgraph.CreateSpec, len(bcb.builders)) + nodes := make([]*Bouncer, len(bcb.builders)) + mutators := make([]Mutator, len(bcb.builders)) + for i := range bcb.builders { + func(i int, root context.Context) { + builder := bcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, bcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, bcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, bcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (bcb *BouncerCreateBulk) SaveX(ctx context.Context) []*Bouncer { + v, err := bcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (bcb *BouncerCreateBulk) Exec(ctx context.Context) error { + _, err := bcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (bcb *BouncerCreateBulk) ExecX(ctx context.Context) { + if err := bcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/bouncer_delete.go b/pkg/database/ent/bouncer_delete.go new file mode 100644 index 0000000..6bfb945 --- /dev/null +++ b/pkg/database/ent/bouncer_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// BouncerDelete is the builder for deleting a Bouncer entity. +type BouncerDelete struct { + config + hooks []Hook + mutation *BouncerMutation +} + +// Where appends a list predicates to the BouncerDelete builder. +func (bd *BouncerDelete) Where(ps ...predicate.Bouncer) *BouncerDelete { + bd.mutation.Where(ps...) + return bd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (bd *BouncerDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(bd.hooks) == 0 { + affected, err = bd.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + bd.mutation = mutation + affected, err = bd.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(bd.hooks) - 1; i >= 0; i-- { + if bd.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = bd.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, bd.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (bd *BouncerDelete) ExecX(ctx context.Context) int { + n, err := bd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (bd *BouncerDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: bouncer.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: bouncer.FieldID, + }, + }, + } + if ps := bd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, bd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// BouncerDeleteOne is the builder for deleting a single Bouncer entity. +type BouncerDeleteOne struct { + bd *BouncerDelete +} + +// Exec executes the deletion query. +func (bdo *BouncerDeleteOne) Exec(ctx context.Context) error { + n, err := bdo.bd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{bouncer.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (bdo *BouncerDeleteOne) ExecX(ctx context.Context) { + bdo.bd.ExecX(ctx) +} diff --git a/pkg/database/ent/bouncer_query.go b/pkg/database/ent/bouncer_query.go new file mode 100644 index 0000000..2747a3e --- /dev/null +++ b/pkg/database/ent/bouncer_query.go @@ -0,0 +1,529 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// BouncerQuery is the builder for querying Bouncer entities. +type BouncerQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.Bouncer + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the BouncerQuery builder. +func (bq *BouncerQuery) Where(ps ...predicate.Bouncer) *BouncerQuery { + bq.predicates = append(bq.predicates, ps...) + return bq +} + +// Limit adds a limit step to the query. +func (bq *BouncerQuery) Limit(limit int) *BouncerQuery { + bq.limit = &limit + return bq +} + +// Offset adds an offset step to the query. +func (bq *BouncerQuery) Offset(offset int) *BouncerQuery { + bq.offset = &offset + return bq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (bq *BouncerQuery) Unique(unique bool) *BouncerQuery { + bq.unique = &unique + return bq +} + +// Order adds an order step to the query. +func (bq *BouncerQuery) Order(o ...OrderFunc) *BouncerQuery { + bq.order = append(bq.order, o...) + return bq +} + +// First returns the first Bouncer entity from the query. +// Returns a *NotFoundError when no Bouncer was found. +func (bq *BouncerQuery) First(ctx context.Context) (*Bouncer, error) { + nodes, err := bq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{bouncer.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (bq *BouncerQuery) FirstX(ctx context.Context) *Bouncer { + node, err := bq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Bouncer ID from the query. +// Returns a *NotFoundError when no Bouncer ID was found. +func (bq *BouncerQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = bq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{bouncer.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (bq *BouncerQuery) FirstIDX(ctx context.Context) int { + id, err := bq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Bouncer entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Bouncer entity is found. +// Returns a *NotFoundError when no Bouncer entities are found. +func (bq *BouncerQuery) Only(ctx context.Context) (*Bouncer, error) { + nodes, err := bq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{bouncer.Label} + default: + return nil, &NotSingularError{bouncer.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (bq *BouncerQuery) OnlyX(ctx context.Context) *Bouncer { + node, err := bq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Bouncer ID in the query. +// Returns a *NotSingularError when more than one Bouncer ID is found. +// Returns a *NotFoundError when no entities are found. +func (bq *BouncerQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = bq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{bouncer.Label} + default: + err = &NotSingularError{bouncer.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (bq *BouncerQuery) OnlyIDX(ctx context.Context) int { + id, err := bq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Bouncers. +func (bq *BouncerQuery) All(ctx context.Context) ([]*Bouncer, error) { + if err := bq.prepareQuery(ctx); err != nil { + return nil, err + } + return bq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (bq *BouncerQuery) AllX(ctx context.Context) []*Bouncer { + nodes, err := bq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Bouncer IDs. +func (bq *BouncerQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := bq.Select(bouncer.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (bq *BouncerQuery) IDsX(ctx context.Context) []int { + ids, err := bq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (bq *BouncerQuery) Count(ctx context.Context) (int, error) { + if err := bq.prepareQuery(ctx); err != nil { + return 0, err + } + return bq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (bq *BouncerQuery) CountX(ctx context.Context) int { + count, err := bq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (bq *BouncerQuery) Exist(ctx context.Context) (bool, error) { + if err := bq.prepareQuery(ctx); err != nil { + return false, err + } + return bq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (bq *BouncerQuery) ExistX(ctx context.Context) bool { + exist, err := bq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the BouncerQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (bq *BouncerQuery) Clone() *BouncerQuery { + if bq == nil { + return nil + } + return &BouncerQuery{ + config: bq.config, + limit: bq.limit, + offset: bq.offset, + order: append([]OrderFunc{}, bq.order...), + predicates: append([]predicate.Bouncer{}, bq.predicates...), + // clone intermediate query. + sql: bq.sql.Clone(), + path: bq.path, + unique: bq.unique, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at"` +// Count int `json:"count,omitempty"` +// } +// +// client.Bouncer.Query(). +// GroupBy(bouncer.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (bq *BouncerQuery) GroupBy(field string, fields ...string) *BouncerGroupBy { + grbuild := &BouncerGroupBy{config: bq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := bq.prepareQuery(ctx); err != nil { + return nil, err + } + return bq.sqlQuery(ctx), nil + } + grbuild.label = bouncer.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at"` +// } +// +// client.Bouncer.Query(). +// Select(bouncer.FieldCreatedAt). +// Scan(ctx, &v) +func (bq *BouncerQuery) Select(fields ...string) *BouncerSelect { + bq.fields = append(bq.fields, fields...) + selbuild := &BouncerSelect{BouncerQuery: bq} + selbuild.label = bouncer.Label + selbuild.flds, selbuild.scan = &bq.fields, selbuild.Scan + return selbuild +} + +func (bq *BouncerQuery) prepareQuery(ctx context.Context) error { + for _, f := range bq.fields { + if !bouncer.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if bq.path != nil { + prev, err := bq.path(ctx) + if err != nil { + return err + } + bq.sql = prev + } + return nil +} + +func (bq *BouncerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Bouncer, error) { + var ( + nodes = []*Bouncer{} + _spec = bq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Bouncer).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Bouncer{config: bq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, bq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (bq *BouncerQuery) sqlCount(ctx context.Context) (int, error) { + _spec := bq.querySpec() + _spec.Node.Columns = bq.fields + if len(bq.fields) > 0 { + _spec.Unique = bq.unique != nil && *bq.unique + } + return sqlgraph.CountNodes(ctx, bq.driver, _spec) +} + +func (bq *BouncerQuery) sqlExist(ctx context.Context) (bool, error) { + switch _, err := bq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: bouncer.Table, + Columns: bouncer.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: bouncer.FieldID, + }, + }, + From: bq.sql, + Unique: true, + } + if unique := bq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := bq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, bouncer.FieldID) + for i := range fields { + if fields[i] != bouncer.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := bq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := bq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := bq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := bq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(bq.driver.Dialect()) + t1 := builder.Table(bouncer.Table) + columns := bq.fields + if len(columns) == 0 { + columns = bouncer.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if bq.sql != nil { + selector = bq.sql + selector.Select(selector.Columns(columns...)...) + } + if bq.unique != nil && *bq.unique { + selector.Distinct() + } + for _, p := range bq.predicates { + p(selector) + } + for _, p := range bq.order { + p(selector) + } + if offset := bq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := bq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// BouncerGroupBy is the group-by builder for Bouncer entities. +type BouncerGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (bgb *BouncerGroupBy) Aggregate(fns ...AggregateFunc) *BouncerGroupBy { + bgb.fns = append(bgb.fns, fns...) + return bgb +} + +// Scan applies the group-by query and scans the result into the given value. +func (bgb *BouncerGroupBy) Scan(ctx context.Context, v any) error { + query, err := bgb.path(ctx) + if err != nil { + return err + } + bgb.sql = query + return bgb.sqlScan(ctx, v) +} + +func (bgb *BouncerGroupBy) sqlScan(ctx context.Context, v any) error { + for _, f := range bgb.fields { + if !bouncer.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := bgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := bgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (bgb *BouncerGroupBy) sqlQuery() *sql.Selector { + selector := bgb.sql.Select() + aggregation := make([]string, 0, len(bgb.fns)) + for _, fn := range bgb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(bgb.fields)+len(bgb.fns)) + for _, f := range bgb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(bgb.fields...)...) +} + +// BouncerSelect is the builder for selecting fields of Bouncer entities. +type BouncerSelect struct { + *BouncerQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (bs *BouncerSelect) Scan(ctx context.Context, v any) error { + if err := bs.prepareQuery(ctx); err != nil { + return err + } + bs.sql = bs.BouncerQuery.sqlQuery(ctx) + return bs.sqlScan(ctx, v) +} + +func (bs *BouncerSelect) sqlScan(ctx context.Context, v any) error { + rows := &sql.Rows{} + query, args := bs.sql.Query() + if err := bs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go new file mode 100644 index 0000000..acf48de --- /dev/null +++ b/pkg/database/ent/bouncer_update.go @@ -0,0 +1,798 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// BouncerUpdate is the builder for updating Bouncer entities. +type BouncerUpdate struct { + config + hooks []Hook + mutation *BouncerMutation +} + +// Where appends a list predicates to the BouncerUpdate builder. +func (bu *BouncerUpdate) Where(ps ...predicate.Bouncer) *BouncerUpdate { + bu.mutation.Where(ps...) + return bu +} + +// SetCreatedAt sets the "created_at" field. +func (bu *BouncerUpdate) SetCreatedAt(t time.Time) *BouncerUpdate { + bu.mutation.SetCreatedAt(t) + return bu +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (bu *BouncerUpdate) ClearCreatedAt() *BouncerUpdate { + bu.mutation.ClearCreatedAt() + return bu +} + +// SetUpdatedAt sets the "updated_at" field. +func (bu *BouncerUpdate) SetUpdatedAt(t time.Time) *BouncerUpdate { + bu.mutation.SetUpdatedAt(t) + return bu +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (bu *BouncerUpdate) ClearUpdatedAt() *BouncerUpdate { + bu.mutation.ClearUpdatedAt() + return bu +} + +// SetName sets the "name" field. +func (bu *BouncerUpdate) SetName(s string) *BouncerUpdate { + bu.mutation.SetName(s) + return bu +} + +// SetAPIKey sets the "api_key" field. +func (bu *BouncerUpdate) SetAPIKey(s string) *BouncerUpdate { + bu.mutation.SetAPIKey(s) + return bu +} + +// SetRevoked sets the "revoked" field. +func (bu *BouncerUpdate) SetRevoked(b bool) *BouncerUpdate { + bu.mutation.SetRevoked(b) + return bu +} + +// SetIPAddress sets the "ip_address" field. +func (bu *BouncerUpdate) SetIPAddress(s string) *BouncerUpdate { + bu.mutation.SetIPAddress(s) + return bu +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableIPAddress(s *string) *BouncerUpdate { + if s != nil { + bu.SetIPAddress(*s) + } + return bu +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (bu *BouncerUpdate) ClearIPAddress() *BouncerUpdate { + bu.mutation.ClearIPAddress() + return bu +} + +// SetType sets the "type" field. +func (bu *BouncerUpdate) SetType(s string) *BouncerUpdate { + bu.mutation.SetType(s) + return bu +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableType(s *string) *BouncerUpdate { + if s != nil { + bu.SetType(*s) + } + return bu +} + +// ClearType clears the value of the "type" field. +func (bu *BouncerUpdate) ClearType() *BouncerUpdate { + bu.mutation.ClearType() + return bu +} + +// SetVersion sets the "version" field. +func (bu *BouncerUpdate) SetVersion(s string) *BouncerUpdate { + bu.mutation.SetVersion(s) + return bu +} + +// SetNillableVersion sets the "version" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableVersion(s *string) *BouncerUpdate { + if s != nil { + bu.SetVersion(*s) + } + return bu +} + +// ClearVersion clears the value of the "version" field. +func (bu *BouncerUpdate) ClearVersion() *BouncerUpdate { + bu.mutation.ClearVersion() + return bu +} + +// SetUntil sets the "until" field. +func (bu *BouncerUpdate) SetUntil(t time.Time) *BouncerUpdate { + bu.mutation.SetUntil(t) + return bu +} + +// SetNillableUntil sets the "until" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableUntil(t *time.Time) *BouncerUpdate { + if t != nil { + bu.SetUntil(*t) + } + return bu +} + +// ClearUntil clears the value of the "until" field. +func (bu *BouncerUpdate) ClearUntil() *BouncerUpdate { + bu.mutation.ClearUntil() + return bu +} + +// SetLastPull sets the "last_pull" field. +func (bu *BouncerUpdate) SetLastPull(t time.Time) *BouncerUpdate { + bu.mutation.SetLastPull(t) + return bu +} + +// SetNillableLastPull sets the "last_pull" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableLastPull(t *time.Time) *BouncerUpdate { + if t != nil { + bu.SetLastPull(*t) + } + return bu +} + +// SetAuthType sets the "auth_type" field. +func (bu *BouncerUpdate) SetAuthType(s string) *BouncerUpdate { + bu.mutation.SetAuthType(s) + return bu +} + +// SetNillableAuthType sets the "auth_type" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableAuthType(s *string) *BouncerUpdate { + if s != nil { + bu.SetAuthType(*s) + } + return bu +} + +// Mutation returns the BouncerMutation object of the builder. +func (bu *BouncerUpdate) Mutation() *BouncerMutation { + return bu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (bu *BouncerUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + bu.defaults() + if len(bu.hooks) == 0 { + affected, err = bu.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + bu.mutation = mutation + affected, err = bu.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(bu.hooks) - 1; i >= 0; i-- { + if bu.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = bu.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, bu.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (bu *BouncerUpdate) SaveX(ctx context.Context) int { + affected, err := bu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (bu *BouncerUpdate) Exec(ctx context.Context) error { + _, err := bu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (bu *BouncerUpdate) ExecX(ctx context.Context) { + if err := bu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (bu *BouncerUpdate) defaults() { + if _, ok := bu.mutation.CreatedAt(); !ok && !bu.mutation.CreatedAtCleared() { + v := bouncer.UpdateDefaultCreatedAt() + bu.mutation.SetCreatedAt(v) + } + if _, ok := bu.mutation.UpdatedAt(); !ok && !bu.mutation.UpdatedAtCleared() { + v := bouncer.UpdateDefaultUpdatedAt() + bu.mutation.SetUpdatedAt(v) + } +} + +func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: bouncer.Table, + Columns: bouncer.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: bouncer.FieldID, + }, + }, + } + if ps := bu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := bu.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldCreatedAt, + }) + } + if bu.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: bouncer.FieldCreatedAt, + }) + } + if value, ok := bu.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUpdatedAt, + }) + } + if bu.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: bouncer.FieldUpdatedAt, + }) + } + if value, ok := bu.mutation.Name(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldName, + }) + } + if value, ok := bu.mutation.APIKey(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldAPIKey, + }) + } + if value, ok := bu.mutation.Revoked(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: bouncer.FieldRevoked, + }) + } + if value, ok := bu.mutation.IPAddress(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldIPAddress, + }) + } + if bu.mutation.IPAddressCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldIPAddress, + }) + } + if value, ok := bu.mutation.GetType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldType, + }) + } + if bu.mutation.TypeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldType, + }) + } + if value, ok := bu.mutation.Version(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldVersion, + }) + } + if bu.mutation.VersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldVersion, + }) + } + if value, ok := bu.mutation.Until(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUntil, + }) + } + if bu.mutation.UntilCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: bouncer.FieldUntil, + }) + } + if value, ok := bu.mutation.LastPull(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldLastPull, + }) + } + if value, ok := bu.mutation.AuthType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldAuthType, + }) + } + if n, err = sqlgraph.UpdateNodes(ctx, bu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{bouncer.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// BouncerUpdateOne is the builder for updating a single Bouncer entity. +type BouncerUpdateOne struct { + config + fields []string + hooks []Hook + mutation *BouncerMutation +} + +// SetCreatedAt sets the "created_at" field. +func (buo *BouncerUpdateOne) SetCreatedAt(t time.Time) *BouncerUpdateOne { + buo.mutation.SetCreatedAt(t) + return buo +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (buo *BouncerUpdateOne) ClearCreatedAt() *BouncerUpdateOne { + buo.mutation.ClearCreatedAt() + return buo +} + +// SetUpdatedAt sets the "updated_at" field. +func (buo *BouncerUpdateOne) SetUpdatedAt(t time.Time) *BouncerUpdateOne { + buo.mutation.SetUpdatedAt(t) + return buo +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (buo *BouncerUpdateOne) ClearUpdatedAt() *BouncerUpdateOne { + buo.mutation.ClearUpdatedAt() + return buo +} + +// SetName sets the "name" field. +func (buo *BouncerUpdateOne) SetName(s string) *BouncerUpdateOne { + buo.mutation.SetName(s) + return buo +} + +// SetAPIKey sets the "api_key" field. +func (buo *BouncerUpdateOne) SetAPIKey(s string) *BouncerUpdateOne { + buo.mutation.SetAPIKey(s) + return buo +} + +// SetRevoked sets the "revoked" field. +func (buo *BouncerUpdateOne) SetRevoked(b bool) *BouncerUpdateOne { + buo.mutation.SetRevoked(b) + return buo +} + +// SetIPAddress sets the "ip_address" field. +func (buo *BouncerUpdateOne) SetIPAddress(s string) *BouncerUpdateOne { + buo.mutation.SetIPAddress(s) + return buo +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableIPAddress(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetIPAddress(*s) + } + return buo +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (buo *BouncerUpdateOne) ClearIPAddress() *BouncerUpdateOne { + buo.mutation.ClearIPAddress() + return buo +} + +// SetType sets the "type" field. +func (buo *BouncerUpdateOne) SetType(s string) *BouncerUpdateOne { + buo.mutation.SetType(s) + return buo +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableType(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetType(*s) + } + return buo +} + +// ClearType clears the value of the "type" field. +func (buo *BouncerUpdateOne) ClearType() *BouncerUpdateOne { + buo.mutation.ClearType() + return buo +} + +// SetVersion sets the "version" field. +func (buo *BouncerUpdateOne) SetVersion(s string) *BouncerUpdateOne { + buo.mutation.SetVersion(s) + return buo +} + +// SetNillableVersion sets the "version" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableVersion(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetVersion(*s) + } + return buo +} + +// ClearVersion clears the value of the "version" field. +func (buo *BouncerUpdateOne) ClearVersion() *BouncerUpdateOne { + buo.mutation.ClearVersion() + return buo +} + +// SetUntil sets the "until" field. +func (buo *BouncerUpdateOne) SetUntil(t time.Time) *BouncerUpdateOne { + buo.mutation.SetUntil(t) + return buo +} + +// SetNillableUntil sets the "until" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableUntil(t *time.Time) *BouncerUpdateOne { + if t != nil { + buo.SetUntil(*t) + } + return buo +} + +// ClearUntil clears the value of the "until" field. +func (buo *BouncerUpdateOne) ClearUntil() *BouncerUpdateOne { + buo.mutation.ClearUntil() + return buo +} + +// SetLastPull sets the "last_pull" field. +func (buo *BouncerUpdateOne) SetLastPull(t time.Time) *BouncerUpdateOne { + buo.mutation.SetLastPull(t) + return buo +} + +// SetNillableLastPull sets the "last_pull" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableLastPull(t *time.Time) *BouncerUpdateOne { + if t != nil { + buo.SetLastPull(*t) + } + return buo +} + +// SetAuthType sets the "auth_type" field. +func (buo *BouncerUpdateOne) SetAuthType(s string) *BouncerUpdateOne { + buo.mutation.SetAuthType(s) + return buo +} + +// SetNillableAuthType sets the "auth_type" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableAuthType(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetAuthType(*s) + } + return buo +} + +// Mutation returns the BouncerMutation object of the builder. +func (buo *BouncerUpdateOne) Mutation() *BouncerMutation { + return buo.mutation +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (buo *BouncerUpdateOne) Select(field string, fields ...string) *BouncerUpdateOne { + buo.fields = append([]string{field}, fields...) + return buo +} + +// Save executes the query and returns the updated Bouncer entity. +func (buo *BouncerUpdateOne) Save(ctx context.Context) (*Bouncer, error) { + var ( + err error + node *Bouncer + ) + buo.defaults() + if len(buo.hooks) == 0 { + node, err = buo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + buo.mutation = mutation + node, err = buo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(buo.hooks) - 1; i >= 0; i-- { + if buo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = buo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, buo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Bouncer) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from BouncerMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (buo *BouncerUpdateOne) SaveX(ctx context.Context) *Bouncer { + node, err := buo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (buo *BouncerUpdateOne) Exec(ctx context.Context) error { + _, err := buo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (buo *BouncerUpdateOne) ExecX(ctx context.Context) { + if err := buo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (buo *BouncerUpdateOne) defaults() { + if _, ok := buo.mutation.CreatedAt(); !ok && !buo.mutation.CreatedAtCleared() { + v := bouncer.UpdateDefaultCreatedAt() + buo.mutation.SetCreatedAt(v) + } + if _, ok := buo.mutation.UpdatedAt(); !ok && !buo.mutation.UpdatedAtCleared() { + v := bouncer.UpdateDefaultUpdatedAt() + buo.mutation.SetUpdatedAt(v) + } +} + +func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: bouncer.Table, + Columns: bouncer.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: bouncer.FieldID, + }, + }, + } + id, ok := buo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Bouncer.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := buo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, bouncer.FieldID) + for _, f := range fields { + if !bouncer.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != bouncer.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := buo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := buo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldCreatedAt, + }) + } + if buo.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: bouncer.FieldCreatedAt, + }) + } + if value, ok := buo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUpdatedAt, + }) + } + if buo.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: bouncer.FieldUpdatedAt, + }) + } + if value, ok := buo.mutation.Name(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldName, + }) + } + if value, ok := buo.mutation.APIKey(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldAPIKey, + }) + } + if value, ok := buo.mutation.Revoked(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: bouncer.FieldRevoked, + }) + } + if value, ok := buo.mutation.IPAddress(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldIPAddress, + }) + } + if buo.mutation.IPAddressCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldIPAddress, + }) + } + if value, ok := buo.mutation.GetType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldType, + }) + } + if buo.mutation.TypeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldType, + }) + } + if value, ok := buo.mutation.Version(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldVersion, + }) + } + if buo.mutation.VersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: bouncer.FieldVersion, + }) + } + if value, ok := buo.mutation.Until(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldUntil, + }) + } + if buo.mutation.UntilCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: bouncer.FieldUntil, + }) + } + if value, ok := buo.mutation.LastPull(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: bouncer.FieldLastPull, + }) + } + if value, ok := buo.mutation.AuthType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: bouncer.FieldAuthType, + }) + } + _node = &Bouncer{config: buo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, buo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{bouncer.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go new file mode 100644 index 0000000..97909ee --- /dev/null +++ b/pkg/database/ent/client.go @@ -0,0 +1,827 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "log" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/migrate" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // Alert is the client for interacting with the Alert builders. + Alert *AlertClient + // Bouncer is the client for interacting with the Bouncer builders. + Bouncer *BouncerClient + // Decision is the client for interacting with the Decision builders. + Decision *DecisionClient + // Event is the client for interacting with the Event builders. + Event *EventClient + // Machine is the client for interacting with the Machine builders. + Machine *MachineClient + // Meta is the client for interacting with the Meta builders. + Meta *MetaClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + cfg := config{log: log.Println, hooks: &hooks{}} + cfg.options(opts...) + client := &Client{config: cfg} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.Alert = NewAlertClient(c.config) + c.Bouncer = NewBouncerClient(c.config) + c.Decision = NewDecisionClient(c.config) + c.Event = NewEventClient(c.config) + c.Machine = NewMachineClient(c.config) + c.Meta = NewMetaClient(c.config) +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + Alert: NewAlertClient(cfg), + Bouncer: NewBouncerClient(cfg), + Decision: NewDecisionClient(cfg), + Event: NewEventClient(cfg), + Machine: NewMachineClient(cfg), + Meta: NewMetaClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + Alert: NewAlertClient(cfg), + Bouncer: NewBouncerClient(cfg), + Decision: NewDecisionClient(cfg), + Event: NewEventClient(cfg), + Machine: NewMachineClient(cfg), + Meta: NewMetaClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// Alert. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + c.Alert.Use(hooks...) + c.Bouncer.Use(hooks...) + c.Decision.Use(hooks...) + c.Event.Use(hooks...) + c.Machine.Use(hooks...) + c.Meta.Use(hooks...) +} + +// AlertClient is a client for the Alert schema. +type AlertClient struct { + config +} + +// NewAlertClient returns a client for the Alert from the given config. +func NewAlertClient(c config) *AlertClient { + return &AlertClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `alert.Hooks(f(g(h())))`. +func (c *AlertClient) Use(hooks ...Hook) { + c.hooks.Alert = append(c.hooks.Alert, hooks...) +} + +// Create returns a builder for creating a Alert entity. +func (c *AlertClient) Create() *AlertCreate { + mutation := newAlertMutation(c.config, OpCreate) + return &AlertCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Alert entities. +func (c *AlertClient) CreateBulk(builders ...*AlertCreate) *AlertCreateBulk { + return &AlertCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Alert. +func (c *AlertClient) Update() *AlertUpdate { + mutation := newAlertMutation(c.config, OpUpdate) + return &AlertUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AlertClient) UpdateOne(a *Alert) *AlertUpdateOne { + mutation := newAlertMutation(c.config, OpUpdateOne, withAlert(a)) + return &AlertUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AlertClient) UpdateOneID(id int) *AlertUpdateOne { + mutation := newAlertMutation(c.config, OpUpdateOne, withAlertID(id)) + return &AlertUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Alert. +func (c *AlertClient) Delete() *AlertDelete { + mutation := newAlertMutation(c.config, OpDelete) + return &AlertDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AlertClient) DeleteOne(a *Alert) *AlertDeleteOne { + return c.DeleteOneID(a.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *AlertClient) DeleteOneID(id int) *AlertDeleteOne { + builder := c.Delete().Where(alert.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AlertDeleteOne{builder} +} + +// Query returns a query builder for Alert. +func (c *AlertClient) Query() *AlertQuery { + return &AlertQuery{ + config: c.config, + } +} + +// Get returns a Alert entity by its id. +func (c *AlertClient) Get(ctx context.Context, id int) (*Alert, error) { + return c.Query().Where(alert.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AlertClient) GetX(ctx context.Context, id int) *Alert { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryOwner queries the owner edge of a Alert. +func (c *AlertClient) QueryOwner(a *Alert) *MachineQuery { + query := &MachineQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, id), + sqlgraph.To(machine.Table, machine.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, alert.OwnerTable, alert.OwnerColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryDecisions queries the decisions edge of a Alert. +func (c *AlertClient) QueryDecisions(a *Alert) *DecisionQuery { + query := &DecisionQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, id), + sqlgraph.To(decision.Table, decision.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.DecisionsTable, alert.DecisionsColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryEvents queries the events edge of a Alert. +func (c *AlertClient) QueryEvents(a *Alert) *EventQuery { + query := &EventQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, id), + sqlgraph.To(event.Table, event.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.EventsTable, alert.EventsColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryMetas queries the metas edge of a Alert. +func (c *AlertClient) QueryMetas(a *Alert) *MetaQuery { + query := &MetaQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(alert.Table, alert.FieldID, id), + sqlgraph.To(meta.Table, meta.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, alert.MetasTable, alert.MetasColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AlertClient) Hooks() []Hook { + return c.hooks.Alert +} + +// BouncerClient is a client for the Bouncer schema. +type BouncerClient struct { + config +} + +// NewBouncerClient returns a client for the Bouncer from the given config. +func NewBouncerClient(c config) *BouncerClient { + return &BouncerClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `bouncer.Hooks(f(g(h())))`. +func (c *BouncerClient) Use(hooks ...Hook) { + c.hooks.Bouncer = append(c.hooks.Bouncer, hooks...) +} + +// Create returns a builder for creating a Bouncer entity. +func (c *BouncerClient) Create() *BouncerCreate { + mutation := newBouncerMutation(c.config, OpCreate) + return &BouncerCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Bouncer entities. +func (c *BouncerClient) CreateBulk(builders ...*BouncerCreate) *BouncerCreateBulk { + return &BouncerCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Bouncer. +func (c *BouncerClient) Update() *BouncerUpdate { + mutation := newBouncerMutation(c.config, OpUpdate) + return &BouncerUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *BouncerClient) UpdateOne(b *Bouncer) *BouncerUpdateOne { + mutation := newBouncerMutation(c.config, OpUpdateOne, withBouncer(b)) + return &BouncerUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *BouncerClient) UpdateOneID(id int) *BouncerUpdateOne { + mutation := newBouncerMutation(c.config, OpUpdateOne, withBouncerID(id)) + return &BouncerUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Bouncer. +func (c *BouncerClient) Delete() *BouncerDelete { + mutation := newBouncerMutation(c.config, OpDelete) + return &BouncerDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *BouncerClient) DeleteOne(b *Bouncer) *BouncerDeleteOne { + return c.DeleteOneID(b.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *BouncerClient) DeleteOneID(id int) *BouncerDeleteOne { + builder := c.Delete().Where(bouncer.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &BouncerDeleteOne{builder} +} + +// Query returns a query builder for Bouncer. +func (c *BouncerClient) Query() *BouncerQuery { + return &BouncerQuery{ + config: c.config, + } +} + +// Get returns a Bouncer entity by its id. +func (c *BouncerClient) Get(ctx context.Context, id int) (*Bouncer, error) { + return c.Query().Where(bouncer.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *BouncerClient) GetX(ctx context.Context, id int) *Bouncer { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *BouncerClient) Hooks() []Hook { + return c.hooks.Bouncer +} + +// DecisionClient is a client for the Decision schema. +type DecisionClient struct { + config +} + +// NewDecisionClient returns a client for the Decision from the given config. +func NewDecisionClient(c config) *DecisionClient { + return &DecisionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `decision.Hooks(f(g(h())))`. +func (c *DecisionClient) Use(hooks ...Hook) { + c.hooks.Decision = append(c.hooks.Decision, hooks...) +} + +// Create returns a builder for creating a Decision entity. +func (c *DecisionClient) Create() *DecisionCreate { + mutation := newDecisionMutation(c.config, OpCreate) + return &DecisionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Decision entities. +func (c *DecisionClient) CreateBulk(builders ...*DecisionCreate) *DecisionCreateBulk { + return &DecisionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Decision. +func (c *DecisionClient) Update() *DecisionUpdate { + mutation := newDecisionMutation(c.config, OpUpdate) + return &DecisionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DecisionClient) UpdateOne(d *Decision) *DecisionUpdateOne { + mutation := newDecisionMutation(c.config, OpUpdateOne, withDecision(d)) + return &DecisionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DecisionClient) UpdateOneID(id int) *DecisionUpdateOne { + mutation := newDecisionMutation(c.config, OpUpdateOne, withDecisionID(id)) + return &DecisionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Decision. +func (c *DecisionClient) Delete() *DecisionDelete { + mutation := newDecisionMutation(c.config, OpDelete) + return &DecisionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DecisionClient) DeleteOne(d *Decision) *DecisionDeleteOne { + return c.DeleteOneID(d.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *DecisionClient) DeleteOneID(id int) *DecisionDeleteOne { + builder := c.Delete().Where(decision.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DecisionDeleteOne{builder} +} + +// Query returns a query builder for Decision. +func (c *DecisionClient) Query() *DecisionQuery { + return &DecisionQuery{ + config: c.config, + } +} + +// Get returns a Decision entity by its id. +func (c *DecisionClient) Get(ctx context.Context, id int) (*Decision, error) { + return c.Query().Where(decision.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DecisionClient) GetX(ctx context.Context, id int) *Decision { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryOwner queries the owner edge of a Decision. +func (c *DecisionClient) QueryOwner(d *Decision) *AlertQuery { + query := &AlertQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := d.ID + step := sqlgraph.NewStep( + sqlgraph.From(decision.Table, decision.FieldID, id), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, decision.OwnerTable, decision.OwnerColumn), + ) + fromV = sqlgraph.Neighbors(d.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DecisionClient) Hooks() []Hook { + return c.hooks.Decision +} + +// EventClient is a client for the Event schema. +type EventClient struct { + config +} + +// NewEventClient returns a client for the Event from the given config. +func NewEventClient(c config) *EventClient { + return &EventClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `event.Hooks(f(g(h())))`. +func (c *EventClient) Use(hooks ...Hook) { + c.hooks.Event = append(c.hooks.Event, hooks...) +} + +// Create returns a builder for creating a Event entity. +func (c *EventClient) Create() *EventCreate { + mutation := newEventMutation(c.config, OpCreate) + return &EventCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Event entities. +func (c *EventClient) CreateBulk(builders ...*EventCreate) *EventCreateBulk { + return &EventCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Event. +func (c *EventClient) Update() *EventUpdate { + mutation := newEventMutation(c.config, OpUpdate) + return &EventUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *EventClient) UpdateOne(e *Event) *EventUpdateOne { + mutation := newEventMutation(c.config, OpUpdateOne, withEvent(e)) + return &EventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *EventClient) UpdateOneID(id int) *EventUpdateOne { + mutation := newEventMutation(c.config, OpUpdateOne, withEventID(id)) + return &EventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Event. +func (c *EventClient) Delete() *EventDelete { + mutation := newEventMutation(c.config, OpDelete) + return &EventDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *EventClient) DeleteOne(e *Event) *EventDeleteOne { + return c.DeleteOneID(e.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *EventClient) DeleteOneID(id int) *EventDeleteOne { + builder := c.Delete().Where(event.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &EventDeleteOne{builder} +} + +// Query returns a query builder for Event. +func (c *EventClient) Query() *EventQuery { + return &EventQuery{ + config: c.config, + } +} + +// Get returns a Event entity by its id. +func (c *EventClient) Get(ctx context.Context, id int) (*Event, error) { + return c.Query().Where(event.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *EventClient) GetX(ctx context.Context, id int) *Event { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryOwner queries the owner edge of a Event. +func (c *EventClient) QueryOwner(e *Event) *AlertQuery { + query := &AlertQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := e.ID + step := sqlgraph.NewStep( + sqlgraph.From(event.Table, event.FieldID, id), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, event.OwnerTable, event.OwnerColumn), + ) + fromV = sqlgraph.Neighbors(e.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *EventClient) Hooks() []Hook { + return c.hooks.Event +} + +// MachineClient is a client for the Machine schema. +type MachineClient struct { + config +} + +// NewMachineClient returns a client for the Machine from the given config. +func NewMachineClient(c config) *MachineClient { + return &MachineClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `machine.Hooks(f(g(h())))`. +func (c *MachineClient) Use(hooks ...Hook) { + c.hooks.Machine = append(c.hooks.Machine, hooks...) +} + +// Create returns a builder for creating a Machine entity. +func (c *MachineClient) Create() *MachineCreate { + mutation := newMachineMutation(c.config, OpCreate) + return &MachineCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Machine entities. +func (c *MachineClient) CreateBulk(builders ...*MachineCreate) *MachineCreateBulk { + return &MachineCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Machine. +func (c *MachineClient) Update() *MachineUpdate { + mutation := newMachineMutation(c.config, OpUpdate) + return &MachineUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *MachineClient) UpdateOne(m *Machine) *MachineUpdateOne { + mutation := newMachineMutation(c.config, OpUpdateOne, withMachine(m)) + return &MachineUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *MachineClient) UpdateOneID(id int) *MachineUpdateOne { + mutation := newMachineMutation(c.config, OpUpdateOne, withMachineID(id)) + return &MachineUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Machine. +func (c *MachineClient) Delete() *MachineDelete { + mutation := newMachineMutation(c.config, OpDelete) + return &MachineDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *MachineClient) DeleteOne(m *Machine) *MachineDeleteOne { + return c.DeleteOneID(m.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *MachineClient) DeleteOneID(id int) *MachineDeleteOne { + builder := c.Delete().Where(machine.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &MachineDeleteOne{builder} +} + +// Query returns a query builder for Machine. +func (c *MachineClient) Query() *MachineQuery { + return &MachineQuery{ + config: c.config, + } +} + +// Get returns a Machine entity by its id. +func (c *MachineClient) Get(ctx context.Context, id int) (*Machine, error) { + return c.Query().Where(machine.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *MachineClient) GetX(ctx context.Context, id int) *Machine { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAlerts queries the alerts edge of a Machine. +func (c *MachineClient) QueryAlerts(m *Machine) *AlertQuery { + query := &AlertQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := m.ID + step := sqlgraph.NewStep( + sqlgraph.From(machine.Table, machine.FieldID, id), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, machine.AlertsTable, machine.AlertsColumn), + ) + fromV = sqlgraph.Neighbors(m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *MachineClient) Hooks() []Hook { + return c.hooks.Machine +} + +// MetaClient is a client for the Meta schema. +type MetaClient struct { + config +} + +// NewMetaClient returns a client for the Meta from the given config. +func NewMetaClient(c config) *MetaClient { + return &MetaClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `meta.Hooks(f(g(h())))`. +func (c *MetaClient) Use(hooks ...Hook) { + c.hooks.Meta = append(c.hooks.Meta, hooks...) +} + +// Create returns a builder for creating a Meta entity. +func (c *MetaClient) Create() *MetaCreate { + mutation := newMetaMutation(c.config, OpCreate) + return &MetaCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Meta entities. +func (c *MetaClient) CreateBulk(builders ...*MetaCreate) *MetaCreateBulk { + return &MetaCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Meta. +func (c *MetaClient) Update() *MetaUpdate { + mutation := newMetaMutation(c.config, OpUpdate) + return &MetaUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *MetaClient) UpdateOne(m *Meta) *MetaUpdateOne { + mutation := newMetaMutation(c.config, OpUpdateOne, withMeta(m)) + return &MetaUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *MetaClient) UpdateOneID(id int) *MetaUpdateOne { + mutation := newMetaMutation(c.config, OpUpdateOne, withMetaID(id)) + return &MetaUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Meta. +func (c *MetaClient) Delete() *MetaDelete { + mutation := newMetaMutation(c.config, OpDelete) + return &MetaDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *MetaClient) DeleteOne(m *Meta) *MetaDeleteOne { + return c.DeleteOneID(m.ID) +} + +// DeleteOne returns a builder for deleting the given entity by its id. +func (c *MetaClient) DeleteOneID(id int) *MetaDeleteOne { + builder := c.Delete().Where(meta.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &MetaDeleteOne{builder} +} + +// Query returns a query builder for Meta. +func (c *MetaClient) Query() *MetaQuery { + return &MetaQuery{ + config: c.config, + } +} + +// Get returns a Meta entity by its id. +func (c *MetaClient) Get(ctx context.Context, id int) (*Meta, error) { + return c.Query().Where(meta.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *MetaClient) GetX(ctx context.Context, id int) *Meta { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryOwner queries the owner edge of a Meta. +func (c *MetaClient) QueryOwner(m *Meta) *AlertQuery { + query := &AlertQuery{config: c.config} + query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + id := m.ID + step := sqlgraph.NewStep( + sqlgraph.From(meta.Table, meta.FieldID, id), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, meta.OwnerTable, meta.OwnerColumn), + ) + fromV = sqlgraph.Neighbors(m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *MetaClient) Hooks() []Hook { + return c.hooks.Meta +} diff --git a/pkg/database/ent/config.go b/pkg/database/ent/config.go new file mode 100644 index 0000000..fd97656 --- /dev/null +++ b/pkg/database/ent/config.go @@ -0,0 +1,64 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" +) + +// Option function to configure the client. +type Option func(*config) + +// Config is the configuration for the client and its builder. +type config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks +} + +// hooks per client, for fast access. +type hooks struct { + Alert []ent.Hook + Bouncer []ent.Hook + Decision []ent.Hook + Event []ent.Hook + Machine []ent.Hook + Meta []ent.Hook +} + +// Options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} diff --git a/pkg/database/ent/context.go b/pkg/database/ent/context.go new file mode 100644 index 0000000..7811bfa --- /dev/null +++ b/pkg/database/ent/context.go @@ -0,0 +1,33 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} diff --git a/pkg/database/ent/decision.go b/pkg/database/ent/decision.go new file mode 100644 index 0000000..608ae47 --- /dev/null +++ b/pkg/database/ent/decision.go @@ -0,0 +1,297 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" +) + +// Decision is the model entity for the Decision schema. +type Decision struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt *time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt *time.Time `json:"updated_at,omitempty"` + // Until holds the value of the "until" field. + Until *time.Time `json:"until,omitempty"` + // Scenario holds the value of the "scenario" field. + Scenario string `json:"scenario,omitempty"` + // Type holds the value of the "type" field. + Type string `json:"type,omitempty"` + // StartIP holds the value of the "start_ip" field. + StartIP int64 `json:"start_ip,omitempty"` + // EndIP holds the value of the "end_ip" field. + EndIP int64 `json:"end_ip,omitempty"` + // StartSuffix holds the value of the "start_suffix" field. + StartSuffix int64 `json:"start_suffix,omitempty"` + // EndSuffix holds the value of the "end_suffix" field. + EndSuffix int64 `json:"end_suffix,omitempty"` + // IPSize holds the value of the "ip_size" field. + IPSize int64 `json:"ip_size,omitempty"` + // Scope holds the value of the "scope" field. + Scope string `json:"scope,omitempty"` + // Value holds the value of the "value" field. + Value string `json:"value,omitempty"` + // Origin holds the value of the "origin" field. + Origin string `json:"origin,omitempty"` + // Simulated holds the value of the "simulated" field. + Simulated bool `json:"simulated,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DecisionQuery when eager-loading is set. + Edges DecisionEdges `json:"edges"` + alert_decisions *int +} + +// DecisionEdges holds the relations/edges for other nodes in the graph. +type DecisionEdges struct { + // Owner holds the value of the owner edge. + Owner *Alert `json:"owner,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// OwnerOrErr returns the Owner value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e DecisionEdges) OwnerOrErr() (*Alert, error) { + if e.loadedTypes[0] { + if e.Owner == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: alert.Label} + } + return e.Owner, nil + } + return nil, &NotLoadedError{edge: "owner"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Decision) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case decision.FieldSimulated: + values[i] = new(sql.NullBool) + case decision.FieldID, decision.FieldStartIP, decision.FieldEndIP, decision.FieldStartSuffix, decision.FieldEndSuffix, decision.FieldIPSize: + values[i] = new(sql.NullInt64) + case decision.FieldScenario, decision.FieldType, decision.FieldScope, decision.FieldValue, decision.FieldOrigin: + values[i] = new(sql.NullString) + case decision.FieldCreatedAt, decision.FieldUpdatedAt, decision.FieldUntil: + values[i] = new(sql.NullTime) + case decision.ForeignKeys[0]: // alert_decisions + values[i] = new(sql.NullInt64) + default: + return nil, fmt.Errorf("unexpected column %q for type Decision", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Decision fields. +func (d *Decision) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case decision.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + d.ID = int(value.Int64) + case decision.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + d.CreatedAt = new(time.Time) + *d.CreatedAt = value.Time + } + case decision.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + d.UpdatedAt = new(time.Time) + *d.UpdatedAt = value.Time + } + case decision.FieldUntil: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field until", values[i]) + } else if value.Valid { + d.Until = new(time.Time) + *d.Until = value.Time + } + case decision.FieldScenario: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scenario", values[i]) + } else if value.Valid { + d.Scenario = value.String + } + case decision.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + d.Type = value.String + } + case decision.FieldStartIP: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field start_ip", values[i]) + } else if value.Valid { + d.StartIP = value.Int64 + } + case decision.FieldEndIP: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field end_ip", values[i]) + } else if value.Valid { + d.EndIP = value.Int64 + } + case decision.FieldStartSuffix: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field start_suffix", values[i]) + } else if value.Valid { + d.StartSuffix = value.Int64 + } + case decision.FieldEndSuffix: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field end_suffix", values[i]) + } else if value.Valid { + d.EndSuffix = value.Int64 + } + case decision.FieldIPSize: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field ip_size", values[i]) + } else if value.Valid { + d.IPSize = value.Int64 + } + case decision.FieldScope: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scope", values[i]) + } else if value.Valid { + d.Scope = value.String + } + case decision.FieldValue: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field value", values[i]) + } else if value.Valid { + d.Value = value.String + } + case decision.FieldOrigin: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field origin", values[i]) + } else if value.Valid { + d.Origin = value.String + } + case decision.FieldSimulated: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field simulated", values[i]) + } else if value.Valid { + d.Simulated = value.Bool + } + case decision.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field alert_decisions", value) + } else if value.Valid { + d.alert_decisions = new(int) + *d.alert_decisions = int(value.Int64) + } + } + } + return nil +} + +// QueryOwner queries the "owner" edge of the Decision entity. +func (d *Decision) QueryOwner() *AlertQuery { + return (&DecisionClient{config: d.config}).QueryOwner(d) +} + +// Update returns a builder for updating this Decision. +// Note that you need to call Decision.Unwrap() before calling this method if this Decision +// was returned from a transaction, and the transaction was committed or rolled back. +func (d *Decision) Update() *DecisionUpdateOne { + return (&DecisionClient{config: d.config}).UpdateOne(d) +} + +// Unwrap unwraps the Decision entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (d *Decision) Unwrap() *Decision { + _tx, ok := d.config.driver.(*txDriver) + if !ok { + panic("ent: Decision is not a transactional entity") + } + d.config.driver = _tx.drv + return d +} + +// String implements the fmt.Stringer. +func (d *Decision) String() string { + var builder strings.Builder + builder.WriteString("Decision(") + builder.WriteString(fmt.Sprintf("id=%v, ", d.ID)) + if v := d.CreatedAt; v != nil { + builder.WriteString("created_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := d.UpdatedAt; v != nil { + builder.WriteString("updated_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := d.Until; v != nil { + builder.WriteString("until=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("scenario=") + builder.WriteString(d.Scenario) + builder.WriteString(", ") + builder.WriteString("type=") + builder.WriteString(d.Type) + builder.WriteString(", ") + builder.WriteString("start_ip=") + builder.WriteString(fmt.Sprintf("%v", d.StartIP)) + builder.WriteString(", ") + builder.WriteString("end_ip=") + builder.WriteString(fmt.Sprintf("%v", d.EndIP)) + builder.WriteString(", ") + builder.WriteString("start_suffix=") + builder.WriteString(fmt.Sprintf("%v", d.StartSuffix)) + builder.WriteString(", ") + builder.WriteString("end_suffix=") + builder.WriteString(fmt.Sprintf("%v", d.EndSuffix)) + builder.WriteString(", ") + builder.WriteString("ip_size=") + builder.WriteString(fmt.Sprintf("%v", d.IPSize)) + builder.WriteString(", ") + builder.WriteString("scope=") + builder.WriteString(d.Scope) + builder.WriteString(", ") + builder.WriteString("value=") + builder.WriteString(d.Value) + builder.WriteString(", ") + builder.WriteString("origin=") + builder.WriteString(d.Origin) + builder.WriteString(", ") + builder.WriteString("simulated=") + builder.WriteString(fmt.Sprintf("%v", d.Simulated)) + builder.WriteByte(')') + return builder.String() +} + +// Decisions is a parsable slice of Decision. +type Decisions []*Decision + +func (d Decisions) config(cfg config) { + for _i := range d { + d[_i].config = cfg + } +} diff --git a/pkg/database/ent/decision/decision.go b/pkg/database/ent/decision/decision.go new file mode 100644 index 0000000..f3a724f --- /dev/null +++ b/pkg/database/ent/decision/decision.go @@ -0,0 +1,106 @@ +// Code generated by ent, DO NOT EDIT. + +package decision + +import ( + "time" +) + +const ( + // Label holds the string label denoting the decision type in the database. + Label = "decision" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldUntil holds the string denoting the until field in the database. + FieldUntil = "until" + // FieldScenario holds the string denoting the scenario field in the database. + FieldScenario = "scenario" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldStartIP holds the string denoting the start_ip field in the database. + FieldStartIP = "start_ip" + // FieldEndIP holds the string denoting the end_ip field in the database. + FieldEndIP = "end_ip" + // FieldStartSuffix holds the string denoting the start_suffix field in the database. + FieldStartSuffix = "start_suffix" + // FieldEndSuffix holds the string denoting the end_suffix field in the database. + FieldEndSuffix = "end_suffix" + // FieldIPSize holds the string denoting the ip_size field in the database. + FieldIPSize = "ip_size" + // FieldScope holds the string denoting the scope field in the database. + FieldScope = "scope" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // FieldOrigin holds the string denoting the origin field in the database. + FieldOrigin = "origin" + // FieldSimulated holds the string denoting the simulated field in the database. + FieldSimulated = "simulated" + // EdgeOwner holds the string denoting the owner edge name in mutations. + EdgeOwner = "owner" + // Table holds the table name of the decision in the database. + Table = "decisions" + // OwnerTable is the table that holds the owner relation/edge. + OwnerTable = "decisions" + // OwnerInverseTable is the table name for the Alert entity. + // It exists in this package in order to avoid circular dependency with the "alert" package. + OwnerInverseTable = "alerts" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "alert_decisions" +) + +// Columns holds all SQL columns for decision fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldUntil, + FieldScenario, + FieldType, + FieldStartIP, + FieldEndIP, + FieldStartSuffix, + FieldEndSuffix, + FieldIPSize, + FieldScope, + FieldValue, + FieldOrigin, + FieldSimulated, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "decisions" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "alert_decisions", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. + UpdateDefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DefaultSimulated holds the default value on creation for the "simulated" field. + DefaultSimulated bool +) diff --git a/pkg/database/ent/decision/where.go b/pkg/database/ent/decision/where.go new file mode 100644 index 0000000..c936685 --- /dev/null +++ b/pkg/database/ent/decision/where.go @@ -0,0 +1,1373 @@ +// Code generated by ent, DO NOT EDIT. + +package decision + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Until applies equality check predicate on the "until" field. It's identical to UntilEQ. +func Until(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUntil), v)) + }) +} + +// Scenario applies equality check predicate on the "scenario" field. It's identical to ScenarioEQ. +func Scenario(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenario), v)) + }) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }) +} + +// StartIP applies equality check predicate on the "start_ip" field. It's identical to StartIPEQ. +func StartIP(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStartIP), v)) + }) +} + +// EndIP applies equality check predicate on the "end_ip" field. It's identical to EndIPEQ. +func EndIP(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldEndIP), v)) + }) +} + +// StartSuffix applies equality check predicate on the "start_suffix" field. It's identical to StartSuffixEQ. +func StartSuffix(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStartSuffix), v)) + }) +} + +// EndSuffix applies equality check predicate on the "end_suffix" field. It's identical to EndSuffixEQ. +func EndSuffix(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldEndSuffix), v)) + }) +} + +// IPSize applies equality check predicate on the "ip_size" field. It's identical to IPSizeEQ. +func IPSize(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIPSize), v)) + }) +} + +// Scope applies equality check predicate on the "scope" field. It's identical to ScopeEQ. +func Scope(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScope), v)) + }) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldValue), v)) + }) +} + +// Origin applies equality check predicate on the "origin" field. It's identical to OriginEQ. +func Origin(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldOrigin), v)) + }) +} + +// Simulated applies equality check predicate on the "simulated" field. It's identical to SimulatedEQ. +func Simulated(v bool) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSimulated), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. +func CreatedAtIsNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldCreatedAt))) + }) +} + +// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. +func CreatedAtNotNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldCreatedAt))) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. +func UpdatedAtIsNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldUpdatedAt))) + }) +} + +// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. +func UpdatedAtNotNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldUpdatedAt))) + }) +} + +// UntilEQ applies the EQ predicate on the "until" field. +func UntilEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUntil), v)) + }) +} + +// UntilNEQ applies the NEQ predicate on the "until" field. +func UntilNEQ(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUntil), v)) + }) +} + +// UntilIn applies the In predicate on the "until" field. +func UntilIn(vs ...time.Time) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUntil), v...)) + }) +} + +// UntilNotIn applies the NotIn predicate on the "until" field. +func UntilNotIn(vs ...time.Time) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUntil), v...)) + }) +} + +// UntilGT applies the GT predicate on the "until" field. +func UntilGT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUntil), v)) + }) +} + +// UntilGTE applies the GTE predicate on the "until" field. +func UntilGTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUntil), v)) + }) +} + +// UntilLT applies the LT predicate on the "until" field. +func UntilLT(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUntil), v)) + }) +} + +// UntilLTE applies the LTE predicate on the "until" field. +func UntilLTE(v time.Time) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUntil), v)) + }) +} + +// UntilIsNil applies the IsNil predicate on the "until" field. +func UntilIsNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldUntil))) + }) +} + +// UntilNotNil applies the NotNil predicate on the "until" field. +func UntilNotNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldUntil))) + }) +} + +// ScenarioEQ applies the EQ predicate on the "scenario" field. +func ScenarioEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenario), v)) + }) +} + +// ScenarioNEQ applies the NEQ predicate on the "scenario" field. +func ScenarioNEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScenario), v)) + }) +} + +// ScenarioIn applies the In predicate on the "scenario" field. +func ScenarioIn(vs ...string) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldScenario), v...)) + }) +} + +// ScenarioNotIn applies the NotIn predicate on the "scenario" field. +func ScenarioNotIn(vs ...string) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldScenario), v...)) + }) +} + +// ScenarioGT applies the GT predicate on the "scenario" field. +func ScenarioGT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScenario), v)) + }) +} + +// ScenarioGTE applies the GTE predicate on the "scenario" field. +func ScenarioGTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScenario), v)) + }) +} + +// ScenarioLT applies the LT predicate on the "scenario" field. +func ScenarioLT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScenario), v)) + }) +} + +// ScenarioLTE applies the LTE predicate on the "scenario" field. +func ScenarioLTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScenario), v)) + }) +} + +// ScenarioContains applies the Contains predicate on the "scenario" field. +func ScenarioContains(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScenario), v)) + }) +} + +// ScenarioHasPrefix applies the HasPrefix predicate on the "scenario" field. +func ScenarioHasPrefix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScenario), v)) + }) +} + +// ScenarioHasSuffix applies the HasSuffix predicate on the "scenario" field. +func ScenarioHasSuffix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScenario), v)) + }) +} + +// ScenarioEqualFold applies the EqualFold predicate on the "scenario" field. +func ScenarioEqualFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScenario), v)) + }) +} + +// ScenarioContainsFold applies the ContainsFold predicate on the "scenario" field. +func ScenarioContainsFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScenario), v)) + }) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldType), v)) + }) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldType), v)) + }) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldType), v...)) + }) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldType), v...)) + }) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldType), v)) + }) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldType), v)) + }) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldType), v)) + }) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldType), v)) + }) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldType), v)) + }) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldType), v)) + }) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldType), v)) + }) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldType), v)) + }) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldType), v)) + }) +} + +// StartIPEQ applies the EQ predicate on the "start_ip" field. +func StartIPEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStartIP), v)) + }) +} + +// StartIPNEQ applies the NEQ predicate on the "start_ip" field. +func StartIPNEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldStartIP), v)) + }) +} + +// StartIPIn applies the In predicate on the "start_ip" field. +func StartIPIn(vs ...int64) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldStartIP), v...)) + }) +} + +// StartIPNotIn applies the NotIn predicate on the "start_ip" field. +func StartIPNotIn(vs ...int64) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldStartIP), v...)) + }) +} + +// StartIPGT applies the GT predicate on the "start_ip" field. +func StartIPGT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldStartIP), v)) + }) +} + +// StartIPGTE applies the GTE predicate on the "start_ip" field. +func StartIPGTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldStartIP), v)) + }) +} + +// StartIPLT applies the LT predicate on the "start_ip" field. +func StartIPLT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldStartIP), v)) + }) +} + +// StartIPLTE applies the LTE predicate on the "start_ip" field. +func StartIPLTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldStartIP), v)) + }) +} + +// StartIPIsNil applies the IsNil predicate on the "start_ip" field. +func StartIPIsNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldStartIP))) + }) +} + +// StartIPNotNil applies the NotNil predicate on the "start_ip" field. +func StartIPNotNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldStartIP))) + }) +} + +// EndIPEQ applies the EQ predicate on the "end_ip" field. +func EndIPEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldEndIP), v)) + }) +} + +// EndIPNEQ applies the NEQ predicate on the "end_ip" field. +func EndIPNEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldEndIP), v)) + }) +} + +// EndIPIn applies the In predicate on the "end_ip" field. +func EndIPIn(vs ...int64) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldEndIP), v...)) + }) +} + +// EndIPNotIn applies the NotIn predicate on the "end_ip" field. +func EndIPNotIn(vs ...int64) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldEndIP), v...)) + }) +} + +// EndIPGT applies the GT predicate on the "end_ip" field. +func EndIPGT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldEndIP), v)) + }) +} + +// EndIPGTE applies the GTE predicate on the "end_ip" field. +func EndIPGTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldEndIP), v)) + }) +} + +// EndIPLT applies the LT predicate on the "end_ip" field. +func EndIPLT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldEndIP), v)) + }) +} + +// EndIPLTE applies the LTE predicate on the "end_ip" field. +func EndIPLTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldEndIP), v)) + }) +} + +// EndIPIsNil applies the IsNil predicate on the "end_ip" field. +func EndIPIsNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldEndIP))) + }) +} + +// EndIPNotNil applies the NotNil predicate on the "end_ip" field. +func EndIPNotNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldEndIP))) + }) +} + +// StartSuffixEQ applies the EQ predicate on the "start_suffix" field. +func StartSuffixEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStartSuffix), v)) + }) +} + +// StartSuffixNEQ applies the NEQ predicate on the "start_suffix" field. +func StartSuffixNEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldStartSuffix), v)) + }) +} + +// StartSuffixIn applies the In predicate on the "start_suffix" field. +func StartSuffixIn(vs ...int64) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldStartSuffix), v...)) + }) +} + +// StartSuffixNotIn applies the NotIn predicate on the "start_suffix" field. +func StartSuffixNotIn(vs ...int64) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldStartSuffix), v...)) + }) +} + +// StartSuffixGT applies the GT predicate on the "start_suffix" field. +func StartSuffixGT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldStartSuffix), v)) + }) +} + +// StartSuffixGTE applies the GTE predicate on the "start_suffix" field. +func StartSuffixGTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldStartSuffix), v)) + }) +} + +// StartSuffixLT applies the LT predicate on the "start_suffix" field. +func StartSuffixLT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldStartSuffix), v)) + }) +} + +// StartSuffixLTE applies the LTE predicate on the "start_suffix" field. +func StartSuffixLTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldStartSuffix), v)) + }) +} + +// StartSuffixIsNil applies the IsNil predicate on the "start_suffix" field. +func StartSuffixIsNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldStartSuffix))) + }) +} + +// StartSuffixNotNil applies the NotNil predicate on the "start_suffix" field. +func StartSuffixNotNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldStartSuffix))) + }) +} + +// EndSuffixEQ applies the EQ predicate on the "end_suffix" field. +func EndSuffixEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldEndSuffix), v)) + }) +} + +// EndSuffixNEQ applies the NEQ predicate on the "end_suffix" field. +func EndSuffixNEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldEndSuffix), v)) + }) +} + +// EndSuffixIn applies the In predicate on the "end_suffix" field. +func EndSuffixIn(vs ...int64) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldEndSuffix), v...)) + }) +} + +// EndSuffixNotIn applies the NotIn predicate on the "end_suffix" field. +func EndSuffixNotIn(vs ...int64) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldEndSuffix), v...)) + }) +} + +// EndSuffixGT applies the GT predicate on the "end_suffix" field. +func EndSuffixGT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldEndSuffix), v)) + }) +} + +// EndSuffixGTE applies the GTE predicate on the "end_suffix" field. +func EndSuffixGTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldEndSuffix), v)) + }) +} + +// EndSuffixLT applies the LT predicate on the "end_suffix" field. +func EndSuffixLT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldEndSuffix), v)) + }) +} + +// EndSuffixLTE applies the LTE predicate on the "end_suffix" field. +func EndSuffixLTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldEndSuffix), v)) + }) +} + +// EndSuffixIsNil applies the IsNil predicate on the "end_suffix" field. +func EndSuffixIsNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldEndSuffix))) + }) +} + +// EndSuffixNotNil applies the NotNil predicate on the "end_suffix" field. +func EndSuffixNotNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldEndSuffix))) + }) +} + +// IPSizeEQ applies the EQ predicate on the "ip_size" field. +func IPSizeEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIPSize), v)) + }) +} + +// IPSizeNEQ applies the NEQ predicate on the "ip_size" field. +func IPSizeNEQ(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldIPSize), v)) + }) +} + +// IPSizeIn applies the In predicate on the "ip_size" field. +func IPSizeIn(vs ...int64) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldIPSize), v...)) + }) +} + +// IPSizeNotIn applies the NotIn predicate on the "ip_size" field. +func IPSizeNotIn(vs ...int64) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldIPSize), v...)) + }) +} + +// IPSizeGT applies the GT predicate on the "ip_size" field. +func IPSizeGT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldIPSize), v)) + }) +} + +// IPSizeGTE applies the GTE predicate on the "ip_size" field. +func IPSizeGTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldIPSize), v)) + }) +} + +// IPSizeLT applies the LT predicate on the "ip_size" field. +func IPSizeLT(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldIPSize), v)) + }) +} + +// IPSizeLTE applies the LTE predicate on the "ip_size" field. +func IPSizeLTE(v int64) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldIPSize), v)) + }) +} + +// IPSizeIsNil applies the IsNil predicate on the "ip_size" field. +func IPSizeIsNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldIPSize))) + }) +} + +// IPSizeNotNil applies the NotNil predicate on the "ip_size" field. +func IPSizeNotNil() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldIPSize))) + }) +} + +// ScopeEQ applies the EQ predicate on the "scope" field. +func ScopeEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScope), v)) + }) +} + +// ScopeNEQ applies the NEQ predicate on the "scope" field. +func ScopeNEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScope), v)) + }) +} + +// ScopeIn applies the In predicate on the "scope" field. +func ScopeIn(vs ...string) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldScope), v...)) + }) +} + +// ScopeNotIn applies the NotIn predicate on the "scope" field. +func ScopeNotIn(vs ...string) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldScope), v...)) + }) +} + +// ScopeGT applies the GT predicate on the "scope" field. +func ScopeGT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScope), v)) + }) +} + +// ScopeGTE applies the GTE predicate on the "scope" field. +func ScopeGTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScope), v)) + }) +} + +// ScopeLT applies the LT predicate on the "scope" field. +func ScopeLT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScope), v)) + }) +} + +// ScopeLTE applies the LTE predicate on the "scope" field. +func ScopeLTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScope), v)) + }) +} + +// ScopeContains applies the Contains predicate on the "scope" field. +func ScopeContains(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScope), v)) + }) +} + +// ScopeHasPrefix applies the HasPrefix predicate on the "scope" field. +func ScopeHasPrefix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScope), v)) + }) +} + +// ScopeHasSuffix applies the HasSuffix predicate on the "scope" field. +func ScopeHasSuffix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScope), v)) + }) +} + +// ScopeEqualFold applies the EqualFold predicate on the "scope" field. +func ScopeEqualFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScope), v)) + }) +} + +// ScopeContainsFold applies the ContainsFold predicate on the "scope" field. +func ScopeContainsFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScope), v)) + }) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldValue), v)) + }) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldValue), v)) + }) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...string) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldValue), v...)) + }) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...string) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldValue), v...)) + }) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldValue), v)) + }) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldValue), v)) + }) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldValue), v)) + }) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldValue), v)) + }) +} + +// ValueContains applies the Contains predicate on the "value" field. +func ValueContains(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldValue), v)) + }) +} + +// ValueHasPrefix applies the HasPrefix predicate on the "value" field. +func ValueHasPrefix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldValue), v)) + }) +} + +// ValueHasSuffix applies the HasSuffix predicate on the "value" field. +func ValueHasSuffix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldValue), v)) + }) +} + +// ValueEqualFold applies the EqualFold predicate on the "value" field. +func ValueEqualFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldValue), v)) + }) +} + +// ValueContainsFold applies the ContainsFold predicate on the "value" field. +func ValueContainsFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldValue), v)) + }) +} + +// OriginEQ applies the EQ predicate on the "origin" field. +func OriginEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldOrigin), v)) + }) +} + +// OriginNEQ applies the NEQ predicate on the "origin" field. +func OriginNEQ(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldOrigin), v)) + }) +} + +// OriginIn applies the In predicate on the "origin" field. +func OriginIn(vs ...string) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldOrigin), v...)) + }) +} + +// OriginNotIn applies the NotIn predicate on the "origin" field. +func OriginNotIn(vs ...string) predicate.Decision { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldOrigin), v...)) + }) +} + +// OriginGT applies the GT predicate on the "origin" field. +func OriginGT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldOrigin), v)) + }) +} + +// OriginGTE applies the GTE predicate on the "origin" field. +func OriginGTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldOrigin), v)) + }) +} + +// OriginLT applies the LT predicate on the "origin" field. +func OriginLT(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldOrigin), v)) + }) +} + +// OriginLTE applies the LTE predicate on the "origin" field. +func OriginLTE(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldOrigin), v)) + }) +} + +// OriginContains applies the Contains predicate on the "origin" field. +func OriginContains(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldOrigin), v)) + }) +} + +// OriginHasPrefix applies the HasPrefix predicate on the "origin" field. +func OriginHasPrefix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldOrigin), v)) + }) +} + +// OriginHasSuffix applies the HasSuffix predicate on the "origin" field. +func OriginHasSuffix(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldOrigin), v)) + }) +} + +// OriginEqualFold applies the EqualFold predicate on the "origin" field. +func OriginEqualFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldOrigin), v)) + }) +} + +// OriginContainsFold applies the ContainsFold predicate on the "origin" field. +func OriginContainsFold(v string) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldOrigin), v)) + }) +} + +// SimulatedEQ applies the EQ predicate on the "simulated" field. +func SimulatedEQ(v bool) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSimulated), v)) + }) +} + +// SimulatedNEQ applies the NEQ predicate on the "simulated" field. +func SimulatedNEQ(v bool) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSimulated), v)) + }) +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...predicate.Alert) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Decision) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Decision) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Decision) predicate.Decision { + return predicate.Decision(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/decision_create.go b/pkg/database/ent/decision_create.go new file mode 100644 index 0000000..aa1ec07 --- /dev/null +++ b/pkg/database/ent/decision_create.go @@ -0,0 +1,554 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" +) + +// DecisionCreate is the builder for creating a Decision entity. +type DecisionCreate struct { + config + mutation *DecisionMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (dc *DecisionCreate) SetCreatedAt(t time.Time) *DecisionCreate { + dc.mutation.SetCreatedAt(t) + return dc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (dc *DecisionCreate) SetNillableCreatedAt(t *time.Time) *DecisionCreate { + if t != nil { + dc.SetCreatedAt(*t) + } + return dc +} + +// SetUpdatedAt sets the "updated_at" field. +func (dc *DecisionCreate) SetUpdatedAt(t time.Time) *DecisionCreate { + dc.mutation.SetUpdatedAt(t) + return dc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (dc *DecisionCreate) SetNillableUpdatedAt(t *time.Time) *DecisionCreate { + if t != nil { + dc.SetUpdatedAt(*t) + } + return dc +} + +// SetUntil sets the "until" field. +func (dc *DecisionCreate) SetUntil(t time.Time) *DecisionCreate { + dc.mutation.SetUntil(t) + return dc +} + +// SetNillableUntil sets the "until" field if the given value is not nil. +func (dc *DecisionCreate) SetNillableUntil(t *time.Time) *DecisionCreate { + if t != nil { + dc.SetUntil(*t) + } + return dc +} + +// SetScenario sets the "scenario" field. +func (dc *DecisionCreate) SetScenario(s string) *DecisionCreate { + dc.mutation.SetScenario(s) + return dc +} + +// SetType sets the "type" field. +func (dc *DecisionCreate) SetType(s string) *DecisionCreate { + dc.mutation.SetType(s) + return dc +} + +// SetStartIP sets the "start_ip" field. +func (dc *DecisionCreate) SetStartIP(i int64) *DecisionCreate { + dc.mutation.SetStartIP(i) + return dc +} + +// SetNillableStartIP sets the "start_ip" field if the given value is not nil. +func (dc *DecisionCreate) SetNillableStartIP(i *int64) *DecisionCreate { + if i != nil { + dc.SetStartIP(*i) + } + return dc +} + +// SetEndIP sets the "end_ip" field. +func (dc *DecisionCreate) SetEndIP(i int64) *DecisionCreate { + dc.mutation.SetEndIP(i) + return dc +} + +// SetNillableEndIP sets the "end_ip" field if the given value is not nil. +func (dc *DecisionCreate) SetNillableEndIP(i *int64) *DecisionCreate { + if i != nil { + dc.SetEndIP(*i) + } + return dc +} + +// SetStartSuffix sets the "start_suffix" field. +func (dc *DecisionCreate) SetStartSuffix(i int64) *DecisionCreate { + dc.mutation.SetStartSuffix(i) + return dc +} + +// SetNillableStartSuffix sets the "start_suffix" field if the given value is not nil. +func (dc *DecisionCreate) SetNillableStartSuffix(i *int64) *DecisionCreate { + if i != nil { + dc.SetStartSuffix(*i) + } + return dc +} + +// SetEndSuffix sets the "end_suffix" field. +func (dc *DecisionCreate) SetEndSuffix(i int64) *DecisionCreate { + dc.mutation.SetEndSuffix(i) + return dc +} + +// SetNillableEndSuffix sets the "end_suffix" field if the given value is not nil. +func (dc *DecisionCreate) SetNillableEndSuffix(i *int64) *DecisionCreate { + if i != nil { + dc.SetEndSuffix(*i) + } + return dc +} + +// SetIPSize sets the "ip_size" field. +func (dc *DecisionCreate) SetIPSize(i int64) *DecisionCreate { + dc.mutation.SetIPSize(i) + return dc +} + +// SetNillableIPSize sets the "ip_size" field if the given value is not nil. +func (dc *DecisionCreate) SetNillableIPSize(i *int64) *DecisionCreate { + if i != nil { + dc.SetIPSize(*i) + } + return dc +} + +// SetScope sets the "scope" field. +func (dc *DecisionCreate) SetScope(s string) *DecisionCreate { + dc.mutation.SetScope(s) + return dc +} + +// SetValue sets the "value" field. +func (dc *DecisionCreate) SetValue(s string) *DecisionCreate { + dc.mutation.SetValue(s) + return dc +} + +// SetOrigin sets the "origin" field. +func (dc *DecisionCreate) SetOrigin(s string) *DecisionCreate { + dc.mutation.SetOrigin(s) + return dc +} + +// SetSimulated sets the "simulated" field. +func (dc *DecisionCreate) SetSimulated(b bool) *DecisionCreate { + dc.mutation.SetSimulated(b) + return dc +} + +// SetNillableSimulated sets the "simulated" field if the given value is not nil. +func (dc *DecisionCreate) SetNillableSimulated(b *bool) *DecisionCreate { + if b != nil { + dc.SetSimulated(*b) + } + return dc +} + +// SetOwnerID sets the "owner" edge to the Alert entity by ID. +func (dc *DecisionCreate) SetOwnerID(id int) *DecisionCreate { + dc.mutation.SetOwnerID(id) + return dc +} + +// SetNillableOwnerID sets the "owner" edge to the Alert entity by ID if the given value is not nil. +func (dc *DecisionCreate) SetNillableOwnerID(id *int) *DecisionCreate { + if id != nil { + dc = dc.SetOwnerID(*id) + } + return dc +} + +// SetOwner sets the "owner" edge to the Alert entity. +func (dc *DecisionCreate) SetOwner(a *Alert) *DecisionCreate { + return dc.SetOwnerID(a.ID) +} + +// Mutation returns the DecisionMutation object of the builder. +func (dc *DecisionCreate) Mutation() *DecisionMutation { + return dc.mutation +} + +// Save creates the Decision in the database. +func (dc *DecisionCreate) Save(ctx context.Context) (*Decision, error) { + var ( + err error + node *Decision + ) + dc.defaults() + if len(dc.hooks) == 0 { + if err = dc.check(); err != nil { + return nil, err + } + node, err = dc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = dc.check(); err != nil { + return nil, err + } + dc.mutation = mutation + if node, err = dc.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(dc.hooks) - 1; i >= 0; i-- { + if dc.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dc.hooks[i](mut) + } + v, err := mut.Mutate(ctx, dc.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Decision) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from DecisionMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (dc *DecisionCreate) SaveX(ctx context.Context) *Decision { + v, err := dc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dc *DecisionCreate) Exec(ctx context.Context) error { + _, err := dc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dc *DecisionCreate) ExecX(ctx context.Context) { + if err := dc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dc *DecisionCreate) defaults() { + if _, ok := dc.mutation.CreatedAt(); !ok { + v := decision.DefaultCreatedAt() + dc.mutation.SetCreatedAt(v) + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + v := decision.DefaultUpdatedAt() + dc.mutation.SetUpdatedAt(v) + } + if _, ok := dc.mutation.Simulated(); !ok { + v := decision.DefaultSimulated + dc.mutation.SetSimulated(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dc *DecisionCreate) check() error { + if _, ok := dc.mutation.Scenario(); !ok { + return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "Decision.scenario"`)} + } + if _, ok := dc.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "Decision.type"`)} + } + if _, ok := dc.mutation.Scope(); !ok { + return &ValidationError{Name: "scope", err: errors.New(`ent: missing required field "Decision.scope"`)} + } + if _, ok := dc.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "Decision.value"`)} + } + if _, ok := dc.mutation.Origin(); !ok { + return &ValidationError{Name: "origin", err: errors.New(`ent: missing required field "Decision.origin"`)} + } + if _, ok := dc.mutation.Simulated(); !ok { + return &ValidationError{Name: "simulated", err: errors.New(`ent: missing required field "Decision.simulated"`)} + } + return nil +} + +func (dc *DecisionCreate) sqlSave(ctx context.Context) (*Decision, error) { + _node, _spec := dc.createSpec() + if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { + var ( + _node = &Decision{config: dc.config} + _spec = &sqlgraph.CreateSpec{ + Table: decision.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + } + ) + if value, ok := dc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldCreatedAt, + }) + _node.CreatedAt = &value + } + if value, ok := dc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUpdatedAt, + }) + _node.UpdatedAt = &value + } + if value, ok := dc.mutation.Until(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUntil, + }) + _node.Until = &value + } + if value, ok := dc.mutation.Scenario(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScenario, + }) + _node.Scenario = value + } + if value, ok := dc.mutation.GetType(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldType, + }) + _node.Type = value + } + if value, ok := dc.mutation.StartIP(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartIP, + }) + _node.StartIP = value + } + if value, ok := dc.mutation.EndIP(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndIP, + }) + _node.EndIP = value + } + if value, ok := dc.mutation.StartSuffix(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartSuffix, + }) + _node.StartSuffix = value + } + if value, ok := dc.mutation.EndSuffix(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndSuffix, + }) + _node.EndSuffix = value + } + if value, ok := dc.mutation.IPSize(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldIPSize, + }) + _node.IPSize = value + } + if value, ok := dc.mutation.Scope(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScope, + }) + _node.Scope = value + } + if value, ok := dc.mutation.Value(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldValue, + }) + _node.Value = value + } + if value, ok := dc.mutation.Origin(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldOrigin, + }) + _node.Origin = value + } + if value, ok := dc.mutation.Simulated(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: decision.FieldSimulated, + }) + _node.Simulated = value + } + if nodes := dc.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: decision.OwnerTable, + Columns: []string{decision.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.alert_decisions = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DecisionCreateBulk is the builder for creating many Decision entities in bulk. +type DecisionCreateBulk struct { + config + builders []*DecisionCreate +} + +// Save creates the Decision entities in the database. +func (dcb *DecisionCreateBulk) Save(ctx context.Context) ([]*Decision, error) { + specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) + nodes := make([]*Decision, len(dcb.builders)) + mutators := make([]Mutator, len(dcb.builders)) + for i := range dcb.builders { + func(i int, root context.Context) { + builder := dcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dcb *DecisionCreateBulk) SaveX(ctx context.Context) []*Decision { + v, err := dcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dcb *DecisionCreateBulk) Exec(ctx context.Context) error { + _, err := dcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcb *DecisionCreateBulk) ExecX(ctx context.Context) { + if err := dcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/decision_delete.go b/pkg/database/ent/decision_delete.go new file mode 100644 index 0000000..24b494b --- /dev/null +++ b/pkg/database/ent/decision_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// DecisionDelete is the builder for deleting a Decision entity. +type DecisionDelete struct { + config + hooks []Hook + mutation *DecisionMutation +} + +// Where appends a list predicates to the DecisionDelete builder. +func (dd *DecisionDelete) Where(ps ...predicate.Decision) *DecisionDelete { + dd.mutation.Where(ps...) + return dd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dd *DecisionDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(dd.hooks) == 0 { + affected, err = dd.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + dd.mutation = mutation + affected, err = dd.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(dd.hooks) - 1; i >= 0; i-- { + if dd.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = dd.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, dd.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dd *DecisionDelete) ExecX(ctx context.Context) int { + n, err := dd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dd *DecisionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: decision.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + if ps := dd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// DecisionDeleteOne is the builder for deleting a single Decision entity. +type DecisionDeleteOne struct { + dd *DecisionDelete +} + +// Exec executes the deletion query. +func (ddo *DecisionDeleteOne) Exec(ctx context.Context) error { + n, err := ddo.dd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{decision.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ddo *DecisionDeleteOne) ExecX(ctx context.Context) { + ddo.dd.ExecX(ctx) +} diff --git a/pkg/database/ent/decision_query.go b/pkg/database/ent/decision_query.go new file mode 100644 index 0000000..4148272 --- /dev/null +++ b/pkg/database/ent/decision_query.go @@ -0,0 +1,613 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// DecisionQuery is the builder for querying Decision entities. +type DecisionQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.Decision + withOwner *AlertQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DecisionQuery builder. +func (dq *DecisionQuery) Where(ps ...predicate.Decision) *DecisionQuery { + dq.predicates = append(dq.predicates, ps...) + return dq +} + +// Limit adds a limit step to the query. +func (dq *DecisionQuery) Limit(limit int) *DecisionQuery { + dq.limit = &limit + return dq +} + +// Offset adds an offset step to the query. +func (dq *DecisionQuery) Offset(offset int) *DecisionQuery { + dq.offset = &offset + return dq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dq *DecisionQuery) Unique(unique bool) *DecisionQuery { + dq.unique = &unique + return dq +} + +// Order adds an order step to the query. +func (dq *DecisionQuery) Order(o ...OrderFunc) *DecisionQuery { + dq.order = append(dq.order, o...) + return dq +} + +// QueryOwner chains the current query on the "owner" edge. +func (dq *DecisionQuery) QueryOwner() *AlertQuery { + query := &AlertQuery{config: dq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(decision.Table, decision.FieldID, selector), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, decision.OwnerTable, decision.OwnerColumn), + ) + fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Decision entity from the query. +// Returns a *NotFoundError when no Decision was found. +func (dq *DecisionQuery) First(ctx context.Context) (*Decision, error) { + nodes, err := dq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{decision.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dq *DecisionQuery) FirstX(ctx context.Context) *Decision { + node, err := dq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Decision ID from the query. +// Returns a *NotFoundError when no Decision ID was found. +func (dq *DecisionQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{decision.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dq *DecisionQuery) FirstIDX(ctx context.Context) int { + id, err := dq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Decision entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Decision entity is found. +// Returns a *NotFoundError when no Decision entities are found. +func (dq *DecisionQuery) Only(ctx context.Context) (*Decision, error) { + nodes, err := dq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{decision.Label} + default: + return nil, &NotSingularError{decision.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dq *DecisionQuery) OnlyX(ctx context.Context) *Decision { + node, err := dq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Decision ID in the query. +// Returns a *NotSingularError when more than one Decision ID is found. +// Returns a *NotFoundError when no entities are found. +func (dq *DecisionQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{decision.Label} + default: + err = &NotSingularError{decision.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dq *DecisionQuery) OnlyIDX(ctx context.Context) int { + id, err := dq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Decisions. +func (dq *DecisionQuery) All(ctx context.Context) ([]*Decision, error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + return dq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (dq *DecisionQuery) AllX(ctx context.Context) []*Decision { + nodes, err := dq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Decision IDs. +func (dq *DecisionQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := dq.Select(decision.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dq *DecisionQuery) IDsX(ctx context.Context) []int { + ids, err := dq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dq *DecisionQuery) Count(ctx context.Context) (int, error) { + if err := dq.prepareQuery(ctx); err != nil { + return 0, err + } + return dq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (dq *DecisionQuery) CountX(ctx context.Context) int { + count, err := dq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dq *DecisionQuery) Exist(ctx context.Context) (bool, error) { + if err := dq.prepareQuery(ctx); err != nil { + return false, err + } + return dq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (dq *DecisionQuery) ExistX(ctx context.Context) bool { + exist, err := dq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DecisionQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dq *DecisionQuery) Clone() *DecisionQuery { + if dq == nil { + return nil + } + return &DecisionQuery{ + config: dq.config, + limit: dq.limit, + offset: dq.offset, + order: append([]OrderFunc{}, dq.order...), + predicates: append([]predicate.Decision{}, dq.predicates...), + withOwner: dq.withOwner.Clone(), + // clone intermediate query. + sql: dq.sql.Clone(), + path: dq.path, + unique: dq.unique, + } +} + +// WithOwner tells the query-builder to eager-load the nodes that are connected to +// the "owner" edge. The optional arguments are used to configure the query builder of the edge. +func (dq *DecisionQuery) WithOwner(opts ...func(*AlertQuery)) *DecisionQuery { + query := &AlertQuery{config: dq.config} + for _, opt := range opts { + opt(query) + } + dq.withOwner = query + return dq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Decision.Query(). +// GroupBy(decision.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (dq *DecisionQuery) GroupBy(field string, fields ...string) *DecisionGroupBy { + grbuild := &DecisionGroupBy{config: dq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + return dq.sqlQuery(ctx), nil + } + grbuild.label = decision.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Decision.Query(). +// Select(decision.FieldCreatedAt). +// Scan(ctx, &v) +func (dq *DecisionQuery) Select(fields ...string) *DecisionSelect { + dq.fields = append(dq.fields, fields...) + selbuild := &DecisionSelect{DecisionQuery: dq} + selbuild.label = decision.Label + selbuild.flds, selbuild.scan = &dq.fields, selbuild.Scan + return selbuild +} + +func (dq *DecisionQuery) prepareQuery(ctx context.Context) error { + for _, f := range dq.fields { + if !decision.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if dq.path != nil { + prev, err := dq.path(ctx) + if err != nil { + return err + } + dq.sql = prev + } + return nil +} + +func (dq *DecisionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Decision, error) { + var ( + nodes = []*Decision{} + withFKs = dq.withFKs + _spec = dq.querySpec() + loadedTypes = [1]bool{ + dq.withOwner != nil, + } + ) + if dq.withOwner != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, decision.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Decision).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Decision{config: dq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := dq.withOwner; query != nil { + if err := dq.loadOwner(ctx, query, nodes, nil, + func(n *Decision, e *Alert) { n.Edges.Owner = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (dq *DecisionQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []*Decision, init func(*Decision), assign func(*Decision, *Alert)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Decision) + for i := range nodes { + if nodes[i].alert_decisions == nil { + continue + } + fk := *nodes[i].alert_decisions + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + query.Where(alert.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "alert_decisions" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (dq *DecisionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dq.querySpec() + _spec.Node.Columns = dq.fields + if len(dq.fields) > 0 { + _spec.Unique = dq.unique != nil && *dq.unique + } + return sqlgraph.CountNodes(ctx, dq.driver, _spec) +} + +func (dq *DecisionQuery) sqlExist(ctx context.Context) (bool, error) { + switch _, err := dq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: decision.Table, + Columns: decision.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + From: dq.sql, + Unique: true, + } + if unique := dq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := dq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, decision.FieldID) + for i := range fields { + if fields[i] != decision.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := dq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := dq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := dq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dq.driver.Dialect()) + t1 := builder.Table(decision.Table) + columns := dq.fields + if len(columns) == 0 { + columns = decision.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dq.sql != nil { + selector = dq.sql + selector.Select(selector.Columns(columns...)...) + } + if dq.unique != nil && *dq.unique { + selector.Distinct() + } + for _, p := range dq.predicates { + p(selector) + } + for _, p := range dq.order { + p(selector) + } + if offset := dq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DecisionGroupBy is the group-by builder for Decision entities. +type DecisionGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dgb *DecisionGroupBy) Aggregate(fns ...AggregateFunc) *DecisionGroupBy { + dgb.fns = append(dgb.fns, fns...) + return dgb +} + +// Scan applies the group-by query and scans the result into the given value. +func (dgb *DecisionGroupBy) Scan(ctx context.Context, v any) error { + query, err := dgb.path(ctx) + if err != nil { + return err + } + dgb.sql = query + return dgb.sqlScan(ctx, v) +} + +func (dgb *DecisionGroupBy) sqlScan(ctx context.Context, v any) error { + for _, f := range dgb.fields { + if !decision.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := dgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (dgb *DecisionGroupBy) sqlQuery() *sql.Selector { + selector := dgb.sql.Select() + aggregation := make([]string, 0, len(dgb.fns)) + for _, fn := range dgb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(dgb.fields)+len(dgb.fns)) + for _, f := range dgb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(dgb.fields...)...) +} + +// DecisionSelect is the builder for selecting fields of Decision entities. +type DecisionSelect struct { + *DecisionQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (ds *DecisionSelect) Scan(ctx context.Context, v any) error { + if err := ds.prepareQuery(ctx); err != nil { + return err + } + ds.sql = ds.DecisionQuery.sqlQuery(ctx) + return ds.sqlScan(ctx, v) +} + +func (ds *DecisionSelect) sqlScan(ctx context.Context, v any) error { + rows := &sql.Rows{} + query, args := ds.sql.Query() + if err := ds.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go new file mode 100644 index 0000000..87b9fd2 --- /dev/null +++ b/pkg/database/ent/decision_update.go @@ -0,0 +1,1201 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// DecisionUpdate is the builder for updating Decision entities. +type DecisionUpdate struct { + config + hooks []Hook + mutation *DecisionMutation +} + +// Where appends a list predicates to the DecisionUpdate builder. +func (du *DecisionUpdate) Where(ps ...predicate.Decision) *DecisionUpdate { + du.mutation.Where(ps...) + return du +} + +// SetCreatedAt sets the "created_at" field. +func (du *DecisionUpdate) SetCreatedAt(t time.Time) *DecisionUpdate { + du.mutation.SetCreatedAt(t) + return du +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (du *DecisionUpdate) ClearCreatedAt() *DecisionUpdate { + du.mutation.ClearCreatedAt() + return du +} + +// SetUpdatedAt sets the "updated_at" field. +func (du *DecisionUpdate) SetUpdatedAt(t time.Time) *DecisionUpdate { + du.mutation.SetUpdatedAt(t) + return du +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (du *DecisionUpdate) ClearUpdatedAt() *DecisionUpdate { + du.mutation.ClearUpdatedAt() + return du +} + +// SetUntil sets the "until" field. +func (du *DecisionUpdate) SetUntil(t time.Time) *DecisionUpdate { + du.mutation.SetUntil(t) + return du +} + +// SetNillableUntil sets the "until" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableUntil(t *time.Time) *DecisionUpdate { + if t != nil { + du.SetUntil(*t) + } + return du +} + +// ClearUntil clears the value of the "until" field. +func (du *DecisionUpdate) ClearUntil() *DecisionUpdate { + du.mutation.ClearUntil() + return du +} + +// SetScenario sets the "scenario" field. +func (du *DecisionUpdate) SetScenario(s string) *DecisionUpdate { + du.mutation.SetScenario(s) + return du +} + +// SetType sets the "type" field. +func (du *DecisionUpdate) SetType(s string) *DecisionUpdate { + du.mutation.SetType(s) + return du +} + +// SetStartIP sets the "start_ip" field. +func (du *DecisionUpdate) SetStartIP(i int64) *DecisionUpdate { + du.mutation.ResetStartIP() + du.mutation.SetStartIP(i) + return du +} + +// SetNillableStartIP sets the "start_ip" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableStartIP(i *int64) *DecisionUpdate { + if i != nil { + du.SetStartIP(*i) + } + return du +} + +// AddStartIP adds i to the "start_ip" field. +func (du *DecisionUpdate) AddStartIP(i int64) *DecisionUpdate { + du.mutation.AddStartIP(i) + return du +} + +// ClearStartIP clears the value of the "start_ip" field. +func (du *DecisionUpdate) ClearStartIP() *DecisionUpdate { + du.mutation.ClearStartIP() + return du +} + +// SetEndIP sets the "end_ip" field. +func (du *DecisionUpdate) SetEndIP(i int64) *DecisionUpdate { + du.mutation.ResetEndIP() + du.mutation.SetEndIP(i) + return du +} + +// SetNillableEndIP sets the "end_ip" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableEndIP(i *int64) *DecisionUpdate { + if i != nil { + du.SetEndIP(*i) + } + return du +} + +// AddEndIP adds i to the "end_ip" field. +func (du *DecisionUpdate) AddEndIP(i int64) *DecisionUpdate { + du.mutation.AddEndIP(i) + return du +} + +// ClearEndIP clears the value of the "end_ip" field. +func (du *DecisionUpdate) ClearEndIP() *DecisionUpdate { + du.mutation.ClearEndIP() + return du +} + +// SetStartSuffix sets the "start_suffix" field. +func (du *DecisionUpdate) SetStartSuffix(i int64) *DecisionUpdate { + du.mutation.ResetStartSuffix() + du.mutation.SetStartSuffix(i) + return du +} + +// SetNillableStartSuffix sets the "start_suffix" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableStartSuffix(i *int64) *DecisionUpdate { + if i != nil { + du.SetStartSuffix(*i) + } + return du +} + +// AddStartSuffix adds i to the "start_suffix" field. +func (du *DecisionUpdate) AddStartSuffix(i int64) *DecisionUpdate { + du.mutation.AddStartSuffix(i) + return du +} + +// ClearStartSuffix clears the value of the "start_suffix" field. +func (du *DecisionUpdate) ClearStartSuffix() *DecisionUpdate { + du.mutation.ClearStartSuffix() + return du +} + +// SetEndSuffix sets the "end_suffix" field. +func (du *DecisionUpdate) SetEndSuffix(i int64) *DecisionUpdate { + du.mutation.ResetEndSuffix() + du.mutation.SetEndSuffix(i) + return du +} + +// SetNillableEndSuffix sets the "end_suffix" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableEndSuffix(i *int64) *DecisionUpdate { + if i != nil { + du.SetEndSuffix(*i) + } + return du +} + +// AddEndSuffix adds i to the "end_suffix" field. +func (du *DecisionUpdate) AddEndSuffix(i int64) *DecisionUpdate { + du.mutation.AddEndSuffix(i) + return du +} + +// ClearEndSuffix clears the value of the "end_suffix" field. +func (du *DecisionUpdate) ClearEndSuffix() *DecisionUpdate { + du.mutation.ClearEndSuffix() + return du +} + +// SetIPSize sets the "ip_size" field. +func (du *DecisionUpdate) SetIPSize(i int64) *DecisionUpdate { + du.mutation.ResetIPSize() + du.mutation.SetIPSize(i) + return du +} + +// SetNillableIPSize sets the "ip_size" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableIPSize(i *int64) *DecisionUpdate { + if i != nil { + du.SetIPSize(*i) + } + return du +} + +// AddIPSize adds i to the "ip_size" field. +func (du *DecisionUpdate) AddIPSize(i int64) *DecisionUpdate { + du.mutation.AddIPSize(i) + return du +} + +// ClearIPSize clears the value of the "ip_size" field. +func (du *DecisionUpdate) ClearIPSize() *DecisionUpdate { + du.mutation.ClearIPSize() + return du +} + +// SetScope sets the "scope" field. +func (du *DecisionUpdate) SetScope(s string) *DecisionUpdate { + du.mutation.SetScope(s) + return du +} + +// SetValue sets the "value" field. +func (du *DecisionUpdate) SetValue(s string) *DecisionUpdate { + du.mutation.SetValue(s) + return du +} + +// SetOrigin sets the "origin" field. +func (du *DecisionUpdate) SetOrigin(s string) *DecisionUpdate { + du.mutation.SetOrigin(s) + return du +} + +// SetSimulated sets the "simulated" field. +func (du *DecisionUpdate) SetSimulated(b bool) *DecisionUpdate { + du.mutation.SetSimulated(b) + return du +} + +// SetNillableSimulated sets the "simulated" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableSimulated(b *bool) *DecisionUpdate { + if b != nil { + du.SetSimulated(*b) + } + return du +} + +// SetOwnerID sets the "owner" edge to the Alert entity by ID. +func (du *DecisionUpdate) SetOwnerID(id int) *DecisionUpdate { + du.mutation.SetOwnerID(id) + return du +} + +// SetNillableOwnerID sets the "owner" edge to the Alert entity by ID if the given value is not nil. +func (du *DecisionUpdate) SetNillableOwnerID(id *int) *DecisionUpdate { + if id != nil { + du = du.SetOwnerID(*id) + } + return du +} + +// SetOwner sets the "owner" edge to the Alert entity. +func (du *DecisionUpdate) SetOwner(a *Alert) *DecisionUpdate { + return du.SetOwnerID(a.ID) +} + +// Mutation returns the DecisionMutation object of the builder. +func (du *DecisionUpdate) Mutation() *DecisionMutation { + return du.mutation +} + +// ClearOwner clears the "owner" edge to the Alert entity. +func (du *DecisionUpdate) ClearOwner() *DecisionUpdate { + du.mutation.ClearOwner() + return du +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (du *DecisionUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + du.defaults() + if len(du.hooks) == 0 { + affected, err = du.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + du.mutation = mutation + affected, err = du.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(du.hooks) - 1; i >= 0; i-- { + if du.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = du.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, du.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (du *DecisionUpdate) SaveX(ctx context.Context) int { + affected, err := du.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (du *DecisionUpdate) Exec(ctx context.Context) error { + _, err := du.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (du *DecisionUpdate) ExecX(ctx context.Context) { + if err := du.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (du *DecisionUpdate) defaults() { + if _, ok := du.mutation.CreatedAt(); !ok && !du.mutation.CreatedAtCleared() { + v := decision.UpdateDefaultCreatedAt() + du.mutation.SetCreatedAt(v) + } + if _, ok := du.mutation.UpdatedAt(); !ok && !du.mutation.UpdatedAtCleared() { + v := decision.UpdateDefaultUpdatedAt() + du.mutation.SetUpdatedAt(v) + } +} + +func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: decision.Table, + Columns: decision.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + if ps := du.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := du.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldCreatedAt, + }) + } + if du.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: decision.FieldCreatedAt, + }) + } + if value, ok := du.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUpdatedAt, + }) + } + if du.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: decision.FieldUpdatedAt, + }) + } + if value, ok := du.mutation.Until(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUntil, + }) + } + if du.mutation.UntilCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: decision.FieldUntil, + }) + } + if value, ok := du.mutation.Scenario(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScenario, + }) + } + if value, ok := du.mutation.GetType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldType, + }) + } + if value, ok := du.mutation.StartIP(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartIP, + }) + } + if value, ok := du.mutation.AddedStartIP(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartIP, + }) + } + if du.mutation.StartIPCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldStartIP, + }) + } + if value, ok := du.mutation.EndIP(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndIP, + }) + } + if value, ok := du.mutation.AddedEndIP(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndIP, + }) + } + if du.mutation.EndIPCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldEndIP, + }) + } + if value, ok := du.mutation.StartSuffix(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartSuffix, + }) + } + if value, ok := du.mutation.AddedStartSuffix(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartSuffix, + }) + } + if du.mutation.StartSuffixCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldStartSuffix, + }) + } + if value, ok := du.mutation.EndSuffix(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndSuffix, + }) + } + if value, ok := du.mutation.AddedEndSuffix(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndSuffix, + }) + } + if du.mutation.EndSuffixCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldEndSuffix, + }) + } + if value, ok := du.mutation.IPSize(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldIPSize, + }) + } + if value, ok := du.mutation.AddedIPSize(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldIPSize, + }) + } + if du.mutation.IPSizeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldIPSize, + }) + } + if value, ok := du.mutation.Scope(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScope, + }) + } + if value, ok := du.mutation.Value(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldValue, + }) + } + if value, ok := du.mutation.Origin(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldOrigin, + }) + } + if value, ok := du.mutation.Simulated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: decision.FieldSimulated, + }) + } + if du.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: decision.OwnerTable, + Columns: []string{decision.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: decision.OwnerTable, + Columns: []string{decision.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, du.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{decision.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// DecisionUpdateOne is the builder for updating a single Decision entity. +type DecisionUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DecisionMutation +} + +// SetCreatedAt sets the "created_at" field. +func (duo *DecisionUpdateOne) SetCreatedAt(t time.Time) *DecisionUpdateOne { + duo.mutation.SetCreatedAt(t) + return duo +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (duo *DecisionUpdateOne) ClearCreatedAt() *DecisionUpdateOne { + duo.mutation.ClearCreatedAt() + return duo +} + +// SetUpdatedAt sets the "updated_at" field. +func (duo *DecisionUpdateOne) SetUpdatedAt(t time.Time) *DecisionUpdateOne { + duo.mutation.SetUpdatedAt(t) + return duo +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (duo *DecisionUpdateOne) ClearUpdatedAt() *DecisionUpdateOne { + duo.mutation.ClearUpdatedAt() + return duo +} + +// SetUntil sets the "until" field. +func (duo *DecisionUpdateOne) SetUntil(t time.Time) *DecisionUpdateOne { + duo.mutation.SetUntil(t) + return duo +} + +// SetNillableUntil sets the "until" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableUntil(t *time.Time) *DecisionUpdateOne { + if t != nil { + duo.SetUntil(*t) + } + return duo +} + +// ClearUntil clears the value of the "until" field. +func (duo *DecisionUpdateOne) ClearUntil() *DecisionUpdateOne { + duo.mutation.ClearUntil() + return duo +} + +// SetScenario sets the "scenario" field. +func (duo *DecisionUpdateOne) SetScenario(s string) *DecisionUpdateOne { + duo.mutation.SetScenario(s) + return duo +} + +// SetType sets the "type" field. +func (duo *DecisionUpdateOne) SetType(s string) *DecisionUpdateOne { + duo.mutation.SetType(s) + return duo +} + +// SetStartIP sets the "start_ip" field. +func (duo *DecisionUpdateOne) SetStartIP(i int64) *DecisionUpdateOne { + duo.mutation.ResetStartIP() + duo.mutation.SetStartIP(i) + return duo +} + +// SetNillableStartIP sets the "start_ip" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableStartIP(i *int64) *DecisionUpdateOne { + if i != nil { + duo.SetStartIP(*i) + } + return duo +} + +// AddStartIP adds i to the "start_ip" field. +func (duo *DecisionUpdateOne) AddStartIP(i int64) *DecisionUpdateOne { + duo.mutation.AddStartIP(i) + return duo +} + +// ClearStartIP clears the value of the "start_ip" field. +func (duo *DecisionUpdateOne) ClearStartIP() *DecisionUpdateOne { + duo.mutation.ClearStartIP() + return duo +} + +// SetEndIP sets the "end_ip" field. +func (duo *DecisionUpdateOne) SetEndIP(i int64) *DecisionUpdateOne { + duo.mutation.ResetEndIP() + duo.mutation.SetEndIP(i) + return duo +} + +// SetNillableEndIP sets the "end_ip" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableEndIP(i *int64) *DecisionUpdateOne { + if i != nil { + duo.SetEndIP(*i) + } + return duo +} + +// AddEndIP adds i to the "end_ip" field. +func (duo *DecisionUpdateOne) AddEndIP(i int64) *DecisionUpdateOne { + duo.mutation.AddEndIP(i) + return duo +} + +// ClearEndIP clears the value of the "end_ip" field. +func (duo *DecisionUpdateOne) ClearEndIP() *DecisionUpdateOne { + duo.mutation.ClearEndIP() + return duo +} + +// SetStartSuffix sets the "start_suffix" field. +func (duo *DecisionUpdateOne) SetStartSuffix(i int64) *DecisionUpdateOne { + duo.mutation.ResetStartSuffix() + duo.mutation.SetStartSuffix(i) + return duo +} + +// SetNillableStartSuffix sets the "start_suffix" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableStartSuffix(i *int64) *DecisionUpdateOne { + if i != nil { + duo.SetStartSuffix(*i) + } + return duo +} + +// AddStartSuffix adds i to the "start_suffix" field. +func (duo *DecisionUpdateOne) AddStartSuffix(i int64) *DecisionUpdateOne { + duo.mutation.AddStartSuffix(i) + return duo +} + +// ClearStartSuffix clears the value of the "start_suffix" field. +func (duo *DecisionUpdateOne) ClearStartSuffix() *DecisionUpdateOne { + duo.mutation.ClearStartSuffix() + return duo +} + +// SetEndSuffix sets the "end_suffix" field. +func (duo *DecisionUpdateOne) SetEndSuffix(i int64) *DecisionUpdateOne { + duo.mutation.ResetEndSuffix() + duo.mutation.SetEndSuffix(i) + return duo +} + +// SetNillableEndSuffix sets the "end_suffix" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableEndSuffix(i *int64) *DecisionUpdateOne { + if i != nil { + duo.SetEndSuffix(*i) + } + return duo +} + +// AddEndSuffix adds i to the "end_suffix" field. +func (duo *DecisionUpdateOne) AddEndSuffix(i int64) *DecisionUpdateOne { + duo.mutation.AddEndSuffix(i) + return duo +} + +// ClearEndSuffix clears the value of the "end_suffix" field. +func (duo *DecisionUpdateOne) ClearEndSuffix() *DecisionUpdateOne { + duo.mutation.ClearEndSuffix() + return duo +} + +// SetIPSize sets the "ip_size" field. +func (duo *DecisionUpdateOne) SetIPSize(i int64) *DecisionUpdateOne { + duo.mutation.ResetIPSize() + duo.mutation.SetIPSize(i) + return duo +} + +// SetNillableIPSize sets the "ip_size" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableIPSize(i *int64) *DecisionUpdateOne { + if i != nil { + duo.SetIPSize(*i) + } + return duo +} + +// AddIPSize adds i to the "ip_size" field. +func (duo *DecisionUpdateOne) AddIPSize(i int64) *DecisionUpdateOne { + duo.mutation.AddIPSize(i) + return duo +} + +// ClearIPSize clears the value of the "ip_size" field. +func (duo *DecisionUpdateOne) ClearIPSize() *DecisionUpdateOne { + duo.mutation.ClearIPSize() + return duo +} + +// SetScope sets the "scope" field. +func (duo *DecisionUpdateOne) SetScope(s string) *DecisionUpdateOne { + duo.mutation.SetScope(s) + return duo +} + +// SetValue sets the "value" field. +func (duo *DecisionUpdateOne) SetValue(s string) *DecisionUpdateOne { + duo.mutation.SetValue(s) + return duo +} + +// SetOrigin sets the "origin" field. +func (duo *DecisionUpdateOne) SetOrigin(s string) *DecisionUpdateOne { + duo.mutation.SetOrigin(s) + return duo +} + +// SetSimulated sets the "simulated" field. +func (duo *DecisionUpdateOne) SetSimulated(b bool) *DecisionUpdateOne { + duo.mutation.SetSimulated(b) + return duo +} + +// SetNillableSimulated sets the "simulated" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableSimulated(b *bool) *DecisionUpdateOne { + if b != nil { + duo.SetSimulated(*b) + } + return duo +} + +// SetOwnerID sets the "owner" edge to the Alert entity by ID. +func (duo *DecisionUpdateOne) SetOwnerID(id int) *DecisionUpdateOne { + duo.mutation.SetOwnerID(id) + return duo +} + +// SetNillableOwnerID sets the "owner" edge to the Alert entity by ID if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableOwnerID(id *int) *DecisionUpdateOne { + if id != nil { + duo = duo.SetOwnerID(*id) + } + return duo +} + +// SetOwner sets the "owner" edge to the Alert entity. +func (duo *DecisionUpdateOne) SetOwner(a *Alert) *DecisionUpdateOne { + return duo.SetOwnerID(a.ID) +} + +// Mutation returns the DecisionMutation object of the builder. +func (duo *DecisionUpdateOne) Mutation() *DecisionMutation { + return duo.mutation +} + +// ClearOwner clears the "owner" edge to the Alert entity. +func (duo *DecisionUpdateOne) ClearOwner() *DecisionUpdateOne { + duo.mutation.ClearOwner() + return duo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (duo *DecisionUpdateOne) Select(field string, fields ...string) *DecisionUpdateOne { + duo.fields = append([]string{field}, fields...) + return duo +} + +// Save executes the query and returns the updated Decision entity. +func (duo *DecisionUpdateOne) Save(ctx context.Context) (*Decision, error) { + var ( + err error + node *Decision + ) + duo.defaults() + if len(duo.hooks) == 0 { + node, err = duo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + duo.mutation = mutation + node, err = duo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(duo.hooks) - 1; i >= 0; i-- { + if duo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = duo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, duo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Decision) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from DecisionMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (duo *DecisionUpdateOne) SaveX(ctx context.Context) *Decision { + node, err := duo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (duo *DecisionUpdateOne) Exec(ctx context.Context) error { + _, err := duo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (duo *DecisionUpdateOne) ExecX(ctx context.Context) { + if err := duo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (duo *DecisionUpdateOne) defaults() { + if _, ok := duo.mutation.CreatedAt(); !ok && !duo.mutation.CreatedAtCleared() { + v := decision.UpdateDefaultCreatedAt() + duo.mutation.SetCreatedAt(v) + } + if _, ok := duo.mutation.UpdatedAt(); !ok && !duo.mutation.UpdatedAtCleared() { + v := decision.UpdateDefaultUpdatedAt() + duo.mutation.SetUpdatedAt(v) + } +} + +func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: decision.Table, + Columns: decision.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: decision.FieldID, + }, + }, + } + id, ok := duo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Decision.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := duo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, decision.FieldID) + for _, f := range fields { + if !decision.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != decision.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := duo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := duo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldCreatedAt, + }) + } + if duo.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: decision.FieldCreatedAt, + }) + } + if value, ok := duo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUpdatedAt, + }) + } + if duo.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: decision.FieldUpdatedAt, + }) + } + if value, ok := duo.mutation.Until(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: decision.FieldUntil, + }) + } + if duo.mutation.UntilCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: decision.FieldUntil, + }) + } + if value, ok := duo.mutation.Scenario(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScenario, + }) + } + if value, ok := duo.mutation.GetType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldType, + }) + } + if value, ok := duo.mutation.StartIP(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartIP, + }) + } + if value, ok := duo.mutation.AddedStartIP(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartIP, + }) + } + if duo.mutation.StartIPCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldStartIP, + }) + } + if value, ok := duo.mutation.EndIP(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndIP, + }) + } + if value, ok := duo.mutation.AddedEndIP(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndIP, + }) + } + if duo.mutation.EndIPCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldEndIP, + }) + } + if value, ok := duo.mutation.StartSuffix(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartSuffix, + }) + } + if value, ok := duo.mutation.AddedStartSuffix(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldStartSuffix, + }) + } + if duo.mutation.StartSuffixCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldStartSuffix, + }) + } + if value, ok := duo.mutation.EndSuffix(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndSuffix, + }) + } + if value, ok := duo.mutation.AddedEndSuffix(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldEndSuffix, + }) + } + if duo.mutation.EndSuffixCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldEndSuffix, + }) + } + if value, ok := duo.mutation.IPSize(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldIPSize, + }) + } + if value, ok := duo.mutation.AddedIPSize(); ok { + _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Value: value, + Column: decision.FieldIPSize, + }) + } + if duo.mutation.IPSizeCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeInt64, + Column: decision.FieldIPSize, + }) + } + if value, ok := duo.mutation.Scope(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldScope, + }) + } + if value, ok := duo.mutation.Value(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldValue, + }) + } + if value, ok := duo.mutation.Origin(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: decision.FieldOrigin, + }) + } + if value, ok := duo.mutation.Simulated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: decision.FieldSimulated, + }) + } + if duo.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: decision.OwnerTable, + Columns: []string{decision.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: decision.OwnerTable, + Columns: []string{decision.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Decision{config: duo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, duo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{decision.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go new file mode 100644 index 0000000..a0b3285 --- /dev/null +++ b/pkg/database/ent/ent.go @@ -0,0 +1,475 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +// OrderFunc applies an ordering on the sql selector. +type OrderFunc func(*sql.Selector) + +// columnChecker returns a function indicates if the column exists in the given column. +func columnChecker(table string) func(string) error { + checks := map[string]func(string) bool{ + alert.Table: alert.ValidColumn, + bouncer.Table: bouncer.ValidColumn, + decision.Table: decision.ValidColumn, + event.Table: event.ValidColumn, + machine.Table: machine.ValidColumn, + meta.Table: meta.ValidColumn, + } + check, ok := checks[table] + if !ok { + return func(string) error { + return fmt.Errorf("unknown table %q", table) + } + } + return func(column string) error { + if !check(column) { + return fmt.Errorf("unknown column %q for table %q", column, table) + } + return nil + } +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) OrderFunc { + return func(s *sql.Selector) { + check := columnChecker(s.TableName()) + for _, f := range fields { + if err := check(f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) OrderFunc { + return func(s *sql.Selector) { + check := columnChecker(s.TableName()) + for _, f := range fields { + if err := check(f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + check := columnChecker(s.TableName()) + if err := check(field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + check := columnChecker(s.TableName()) + if err := check(field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + check := columnChecker(s.TableName()) + if err := check(field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + check := columnChecker(s.TableName()) + if err := check(field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "ent: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "ent: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "ent: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "ent: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/pkg/database/ent/enttest/enttest.go b/pkg/database/ent/enttest/enttest.go new file mode 100644 index 0000000..ccfd020 --- /dev/null +++ b/pkg/database/ent/enttest/enttest.go @@ -0,0 +1,84 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + // required by schema hooks. + _ "github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime" + + "entgo.io/ent/dialect/sql/schema" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/migrate" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []ent.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...ent.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls ent.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { + o := newOptions(opts) + c, err := ent.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls ent.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *ent.Client { + o := newOptions(opts) + c := ent.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *ent.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/pkg/database/ent/event.go b/pkg/database/ent/event.go new file mode 100644 index 0000000..276d091 --- /dev/null +++ b/pkg/database/ent/event.go @@ -0,0 +1,182 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" +) + +// Event is the model entity for the Event schema. +type Event struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt *time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt *time.Time `json:"updated_at,omitempty"` + // Time holds the value of the "time" field. + Time time.Time `json:"time,omitempty"` + // Serialized holds the value of the "serialized" field. + Serialized string `json:"serialized,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the EventQuery when eager-loading is set. + Edges EventEdges `json:"edges"` + alert_events *int +} + +// EventEdges holds the relations/edges for other nodes in the graph. +type EventEdges struct { + // Owner holds the value of the owner edge. + Owner *Alert `json:"owner,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// OwnerOrErr returns the Owner value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e EventEdges) OwnerOrErr() (*Alert, error) { + if e.loadedTypes[0] { + if e.Owner == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: alert.Label} + } + return e.Owner, nil + } + return nil, &NotLoadedError{edge: "owner"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Event) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case event.FieldID: + values[i] = new(sql.NullInt64) + case event.FieldSerialized: + values[i] = new(sql.NullString) + case event.FieldCreatedAt, event.FieldUpdatedAt, event.FieldTime: + values[i] = new(sql.NullTime) + case event.ForeignKeys[0]: // alert_events + values[i] = new(sql.NullInt64) + default: + return nil, fmt.Errorf("unexpected column %q for type Event", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Event fields. +func (e *Event) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case event.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + e.ID = int(value.Int64) + case event.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + e.CreatedAt = new(time.Time) + *e.CreatedAt = value.Time + } + case event.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + e.UpdatedAt = new(time.Time) + *e.UpdatedAt = value.Time + } + case event.FieldTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field time", values[i]) + } else if value.Valid { + e.Time = value.Time + } + case event.FieldSerialized: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field serialized", values[i]) + } else if value.Valid { + e.Serialized = value.String + } + case event.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field alert_events", value) + } else if value.Valid { + e.alert_events = new(int) + *e.alert_events = int(value.Int64) + } + } + } + return nil +} + +// QueryOwner queries the "owner" edge of the Event entity. +func (e *Event) QueryOwner() *AlertQuery { + return (&EventClient{config: e.config}).QueryOwner(e) +} + +// Update returns a builder for updating this Event. +// Note that you need to call Event.Unwrap() before calling this method if this Event +// was returned from a transaction, and the transaction was committed or rolled back. +func (e *Event) Update() *EventUpdateOne { + return (&EventClient{config: e.config}).UpdateOne(e) +} + +// Unwrap unwraps the Event entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (e *Event) Unwrap() *Event { + _tx, ok := e.config.driver.(*txDriver) + if !ok { + panic("ent: Event is not a transactional entity") + } + e.config.driver = _tx.drv + return e +} + +// String implements the fmt.Stringer. +func (e *Event) String() string { + var builder strings.Builder + builder.WriteString("Event(") + builder.WriteString(fmt.Sprintf("id=%v, ", e.ID)) + if v := e.CreatedAt; v != nil { + builder.WriteString("created_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := e.UpdatedAt; v != nil { + builder.WriteString("updated_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("time=") + builder.WriteString(e.Time.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("serialized=") + builder.WriteString(e.Serialized) + builder.WriteByte(')') + return builder.String() +} + +// Events is a parsable slice of Event. +type Events []*Event + +func (e Events) config(cfg config) { + for _i := range e { + e[_i].config = cfg + } +} diff --git a/pkg/database/ent/event/event.go b/pkg/database/ent/event/event.go new file mode 100644 index 0000000..07e58f1 --- /dev/null +++ b/pkg/database/ent/event/event.go @@ -0,0 +1,76 @@ +// Code generated by ent, DO NOT EDIT. + +package event + +import ( + "time" +) + +const ( + // Label holds the string label denoting the event type in the database. + Label = "event" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldTime holds the string denoting the time field in the database. + FieldTime = "time" + // FieldSerialized holds the string denoting the serialized field in the database. + FieldSerialized = "serialized" + // EdgeOwner holds the string denoting the owner edge name in mutations. + EdgeOwner = "owner" + // Table holds the table name of the event in the database. + Table = "events" + // OwnerTable is the table that holds the owner relation/edge. + OwnerTable = "events" + // OwnerInverseTable is the table name for the Alert entity. + // It exists in this package in order to avoid circular dependency with the "alert" package. + OwnerInverseTable = "alerts" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "alert_events" +) + +// Columns holds all SQL columns for event fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldTime, + FieldSerialized, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "events" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "alert_events", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. + UpdateDefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // SerializedValidator is a validator for the "serialized" field. It is called by the builders before save. + SerializedValidator func(string) error +) diff --git a/pkg/database/ent/event/where.go b/pkg/database/ent/event/where.go new file mode 100644 index 0000000..322b6f4 --- /dev/null +++ b/pkg/database/ent/event/where.go @@ -0,0 +1,489 @@ +// Code generated by ent, DO NOT EDIT. + +package event + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Time applies equality check predicate on the "time" field. It's identical to TimeEQ. +func Time(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldTime), v)) + }) +} + +// Serialized applies equality check predicate on the "serialized" field. It's identical to SerializedEQ. +func Serialized(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSerialized), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Event { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Event { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. +func CreatedAtIsNil() predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldCreatedAt))) + }) +} + +// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. +func CreatedAtNotNil() predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldCreatedAt))) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Event { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Event { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. +func UpdatedAtIsNil() predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldUpdatedAt))) + }) +} + +// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. +func UpdatedAtNotNil() predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldUpdatedAt))) + }) +} + +// TimeEQ applies the EQ predicate on the "time" field. +func TimeEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldTime), v)) + }) +} + +// TimeNEQ applies the NEQ predicate on the "time" field. +func TimeNEQ(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldTime), v)) + }) +} + +// TimeIn applies the In predicate on the "time" field. +func TimeIn(vs ...time.Time) predicate.Event { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldTime), v...)) + }) +} + +// TimeNotIn applies the NotIn predicate on the "time" field. +func TimeNotIn(vs ...time.Time) predicate.Event { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldTime), v...)) + }) +} + +// TimeGT applies the GT predicate on the "time" field. +func TimeGT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldTime), v)) + }) +} + +// TimeGTE applies the GTE predicate on the "time" field. +func TimeGTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldTime), v)) + }) +} + +// TimeLT applies the LT predicate on the "time" field. +func TimeLT(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldTime), v)) + }) +} + +// TimeLTE applies the LTE predicate on the "time" field. +func TimeLTE(v time.Time) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldTime), v)) + }) +} + +// SerializedEQ applies the EQ predicate on the "serialized" field. +func SerializedEQ(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldSerialized), v)) + }) +} + +// SerializedNEQ applies the NEQ predicate on the "serialized" field. +func SerializedNEQ(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldSerialized), v)) + }) +} + +// SerializedIn applies the In predicate on the "serialized" field. +func SerializedIn(vs ...string) predicate.Event { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldSerialized), v...)) + }) +} + +// SerializedNotIn applies the NotIn predicate on the "serialized" field. +func SerializedNotIn(vs ...string) predicate.Event { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldSerialized), v...)) + }) +} + +// SerializedGT applies the GT predicate on the "serialized" field. +func SerializedGT(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldSerialized), v)) + }) +} + +// SerializedGTE applies the GTE predicate on the "serialized" field. +func SerializedGTE(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldSerialized), v)) + }) +} + +// SerializedLT applies the LT predicate on the "serialized" field. +func SerializedLT(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldSerialized), v)) + }) +} + +// SerializedLTE applies the LTE predicate on the "serialized" field. +func SerializedLTE(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldSerialized), v)) + }) +} + +// SerializedContains applies the Contains predicate on the "serialized" field. +func SerializedContains(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldSerialized), v)) + }) +} + +// SerializedHasPrefix applies the HasPrefix predicate on the "serialized" field. +func SerializedHasPrefix(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldSerialized), v)) + }) +} + +// SerializedHasSuffix applies the HasSuffix predicate on the "serialized" field. +func SerializedHasSuffix(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldSerialized), v)) + }) +} + +// SerializedEqualFold applies the EqualFold predicate on the "serialized" field. +func SerializedEqualFold(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldSerialized), v)) + }) +} + +// SerializedContainsFold applies the ContainsFold predicate on the "serialized" field. +func SerializedContainsFold(v string) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldSerialized), v)) + }) +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() predicate.Event { + return predicate.Event(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...predicate.Alert) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Event) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Event) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Event) predicate.Event { + return predicate.Event(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/event_create.go b/pkg/database/ent/event_create.go new file mode 100644 index 0000000..3d84489 --- /dev/null +++ b/pkg/database/ent/event_create.go @@ -0,0 +1,347 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" +) + +// EventCreate is the builder for creating a Event entity. +type EventCreate struct { + config + mutation *EventMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (ec *EventCreate) SetCreatedAt(t time.Time) *EventCreate { + ec.mutation.SetCreatedAt(t) + return ec +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (ec *EventCreate) SetNillableCreatedAt(t *time.Time) *EventCreate { + if t != nil { + ec.SetCreatedAt(*t) + } + return ec +} + +// SetUpdatedAt sets the "updated_at" field. +func (ec *EventCreate) SetUpdatedAt(t time.Time) *EventCreate { + ec.mutation.SetUpdatedAt(t) + return ec +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (ec *EventCreate) SetNillableUpdatedAt(t *time.Time) *EventCreate { + if t != nil { + ec.SetUpdatedAt(*t) + } + return ec +} + +// SetTime sets the "time" field. +func (ec *EventCreate) SetTime(t time.Time) *EventCreate { + ec.mutation.SetTime(t) + return ec +} + +// SetSerialized sets the "serialized" field. +func (ec *EventCreate) SetSerialized(s string) *EventCreate { + ec.mutation.SetSerialized(s) + return ec +} + +// SetOwnerID sets the "owner" edge to the Alert entity by ID. +func (ec *EventCreate) SetOwnerID(id int) *EventCreate { + ec.mutation.SetOwnerID(id) + return ec +} + +// SetNillableOwnerID sets the "owner" edge to the Alert entity by ID if the given value is not nil. +func (ec *EventCreate) SetNillableOwnerID(id *int) *EventCreate { + if id != nil { + ec = ec.SetOwnerID(*id) + } + return ec +} + +// SetOwner sets the "owner" edge to the Alert entity. +func (ec *EventCreate) SetOwner(a *Alert) *EventCreate { + return ec.SetOwnerID(a.ID) +} + +// Mutation returns the EventMutation object of the builder. +func (ec *EventCreate) Mutation() *EventMutation { + return ec.mutation +} + +// Save creates the Event in the database. +func (ec *EventCreate) Save(ctx context.Context) (*Event, error) { + var ( + err error + node *Event + ) + ec.defaults() + if len(ec.hooks) == 0 { + if err = ec.check(); err != nil { + return nil, err + } + node, err = ec.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = ec.check(); err != nil { + return nil, err + } + ec.mutation = mutation + if node, err = ec.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(ec.hooks) - 1; i >= 0; i-- { + if ec.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = ec.hooks[i](mut) + } + v, err := mut.Mutate(ctx, ec.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Event) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from EventMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (ec *EventCreate) SaveX(ctx context.Context) *Event { + v, err := ec.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ec *EventCreate) Exec(ctx context.Context) error { + _, err := ec.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ec *EventCreate) ExecX(ctx context.Context) { + if err := ec.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (ec *EventCreate) defaults() { + if _, ok := ec.mutation.CreatedAt(); !ok { + v := event.DefaultCreatedAt() + ec.mutation.SetCreatedAt(v) + } + if _, ok := ec.mutation.UpdatedAt(); !ok { + v := event.DefaultUpdatedAt() + ec.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ec *EventCreate) check() error { + if _, ok := ec.mutation.Time(); !ok { + return &ValidationError{Name: "time", err: errors.New(`ent: missing required field "Event.time"`)} + } + if _, ok := ec.mutation.Serialized(); !ok { + return &ValidationError{Name: "serialized", err: errors.New(`ent: missing required field "Event.serialized"`)} + } + if v, ok := ec.mutation.Serialized(); ok { + if err := event.SerializedValidator(v); err != nil { + return &ValidationError{Name: "serialized", err: fmt.Errorf(`ent: validator failed for field "Event.serialized": %w`, err)} + } + } + return nil +} + +func (ec *EventCreate) sqlSave(ctx context.Context) (*Event, error) { + _node, _spec := ec.createSpec() + if err := sqlgraph.CreateNode(ctx, ec.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { + var ( + _node = &Event{config: ec.config} + _spec = &sqlgraph.CreateSpec{ + Table: event.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + } + ) + if value, ok := ec.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldCreatedAt, + }) + _node.CreatedAt = &value + } + if value, ok := ec.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldUpdatedAt, + }) + _node.UpdatedAt = &value + } + if value, ok := ec.mutation.Time(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldTime, + }) + _node.Time = value + } + if value, ok := ec.mutation.Serialized(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: event.FieldSerialized, + }) + _node.Serialized = value + } + if nodes := ec.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: event.OwnerTable, + Columns: []string{event.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.alert_events = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// EventCreateBulk is the builder for creating many Event entities in bulk. +type EventCreateBulk struct { + config + builders []*EventCreate +} + +// Save creates the Event entities in the database. +func (ecb *EventCreateBulk) Save(ctx context.Context) ([]*Event, error) { + specs := make([]*sqlgraph.CreateSpec, len(ecb.builders)) + nodes := make([]*Event, len(ecb.builders)) + mutators := make([]Mutator, len(ecb.builders)) + for i := range ecb.builders { + func(i int, root context.Context) { + builder := ecb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ecb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ecb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ecb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ecb *EventCreateBulk) SaveX(ctx context.Context) []*Event { + v, err := ecb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ecb *EventCreateBulk) Exec(ctx context.Context) error { + _, err := ecb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ecb *EventCreateBulk) ExecX(ctx context.Context) { + if err := ecb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/event_delete.go b/pkg/database/ent/event_delete.go new file mode 100644 index 0000000..0220dc7 --- /dev/null +++ b/pkg/database/ent/event_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// EventDelete is the builder for deleting a Event entity. +type EventDelete struct { + config + hooks []Hook + mutation *EventMutation +} + +// Where appends a list predicates to the EventDelete builder. +func (ed *EventDelete) Where(ps ...predicate.Event) *EventDelete { + ed.mutation.Where(ps...) + return ed +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ed *EventDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(ed.hooks) == 0 { + affected, err = ed.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + ed.mutation = mutation + affected, err = ed.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(ed.hooks) - 1; i >= 0; i-- { + if ed.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = ed.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, ed.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ed *EventDelete) ExecX(ctx context.Context) int { + n, err := ed.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ed *EventDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: event.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + if ps := ed.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ed.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// EventDeleteOne is the builder for deleting a single Event entity. +type EventDeleteOne struct { + ed *EventDelete +} + +// Exec executes the deletion query. +func (edo *EventDeleteOne) Exec(ctx context.Context) error { + n, err := edo.ed.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{event.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (edo *EventDeleteOne) ExecX(ctx context.Context) { + edo.ed.ExecX(ctx) +} diff --git a/pkg/database/ent/event_query.go b/pkg/database/ent/event_query.go new file mode 100644 index 0000000..49794e9 --- /dev/null +++ b/pkg/database/ent/event_query.go @@ -0,0 +1,613 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// EventQuery is the builder for querying Event entities. +type EventQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.Event + withOwner *AlertQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the EventQuery builder. +func (eq *EventQuery) Where(ps ...predicate.Event) *EventQuery { + eq.predicates = append(eq.predicates, ps...) + return eq +} + +// Limit adds a limit step to the query. +func (eq *EventQuery) Limit(limit int) *EventQuery { + eq.limit = &limit + return eq +} + +// Offset adds an offset step to the query. +func (eq *EventQuery) Offset(offset int) *EventQuery { + eq.offset = &offset + return eq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (eq *EventQuery) Unique(unique bool) *EventQuery { + eq.unique = &unique + return eq +} + +// Order adds an order step to the query. +func (eq *EventQuery) Order(o ...OrderFunc) *EventQuery { + eq.order = append(eq.order, o...) + return eq +} + +// QueryOwner chains the current query on the "owner" edge. +func (eq *EventQuery) QueryOwner() *AlertQuery { + query := &AlertQuery{config: eq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := eq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := eq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(event.Table, event.FieldID, selector), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, event.OwnerTable, event.OwnerColumn), + ) + fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Event entity from the query. +// Returns a *NotFoundError when no Event was found. +func (eq *EventQuery) First(ctx context.Context) (*Event, error) { + nodes, err := eq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{event.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (eq *EventQuery) FirstX(ctx context.Context) *Event { + node, err := eq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Event ID from the query. +// Returns a *NotFoundError when no Event ID was found. +func (eq *EventQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = eq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{event.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (eq *EventQuery) FirstIDX(ctx context.Context) int { + id, err := eq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Event entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Event entity is found. +// Returns a *NotFoundError when no Event entities are found. +func (eq *EventQuery) Only(ctx context.Context) (*Event, error) { + nodes, err := eq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{event.Label} + default: + return nil, &NotSingularError{event.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (eq *EventQuery) OnlyX(ctx context.Context) *Event { + node, err := eq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Event ID in the query. +// Returns a *NotSingularError when more than one Event ID is found. +// Returns a *NotFoundError when no entities are found. +func (eq *EventQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = eq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{event.Label} + default: + err = &NotSingularError{event.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (eq *EventQuery) OnlyIDX(ctx context.Context) int { + id, err := eq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Events. +func (eq *EventQuery) All(ctx context.Context) ([]*Event, error) { + if err := eq.prepareQuery(ctx); err != nil { + return nil, err + } + return eq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (eq *EventQuery) AllX(ctx context.Context) []*Event { + nodes, err := eq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Event IDs. +func (eq *EventQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := eq.Select(event.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (eq *EventQuery) IDsX(ctx context.Context) []int { + ids, err := eq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (eq *EventQuery) Count(ctx context.Context) (int, error) { + if err := eq.prepareQuery(ctx); err != nil { + return 0, err + } + return eq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (eq *EventQuery) CountX(ctx context.Context) int { + count, err := eq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (eq *EventQuery) Exist(ctx context.Context) (bool, error) { + if err := eq.prepareQuery(ctx); err != nil { + return false, err + } + return eq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (eq *EventQuery) ExistX(ctx context.Context) bool { + exist, err := eq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the EventQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (eq *EventQuery) Clone() *EventQuery { + if eq == nil { + return nil + } + return &EventQuery{ + config: eq.config, + limit: eq.limit, + offset: eq.offset, + order: append([]OrderFunc{}, eq.order...), + predicates: append([]predicate.Event{}, eq.predicates...), + withOwner: eq.withOwner.Clone(), + // clone intermediate query. + sql: eq.sql.Clone(), + path: eq.path, + unique: eq.unique, + } +} + +// WithOwner tells the query-builder to eager-load the nodes that are connected to +// the "owner" edge. The optional arguments are used to configure the query builder of the edge. +func (eq *EventQuery) WithOwner(opts ...func(*AlertQuery)) *EventQuery { + query := &AlertQuery{config: eq.config} + for _, opt := range opts { + opt(query) + } + eq.withOwner = query + return eq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Event.Query(). +// GroupBy(event.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (eq *EventQuery) GroupBy(field string, fields ...string) *EventGroupBy { + grbuild := &EventGroupBy{config: eq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := eq.prepareQuery(ctx); err != nil { + return nil, err + } + return eq.sqlQuery(ctx), nil + } + grbuild.label = event.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Event.Query(). +// Select(event.FieldCreatedAt). +// Scan(ctx, &v) +func (eq *EventQuery) Select(fields ...string) *EventSelect { + eq.fields = append(eq.fields, fields...) + selbuild := &EventSelect{EventQuery: eq} + selbuild.label = event.Label + selbuild.flds, selbuild.scan = &eq.fields, selbuild.Scan + return selbuild +} + +func (eq *EventQuery) prepareQuery(ctx context.Context) error { + for _, f := range eq.fields { + if !event.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if eq.path != nil { + prev, err := eq.path(ctx) + if err != nil { + return err + } + eq.sql = prev + } + return nil +} + +func (eq *EventQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Event, error) { + var ( + nodes = []*Event{} + withFKs = eq.withFKs + _spec = eq.querySpec() + loadedTypes = [1]bool{ + eq.withOwner != nil, + } + ) + if eq.withOwner != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, event.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Event).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Event{config: eq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, eq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := eq.withOwner; query != nil { + if err := eq.loadOwner(ctx, query, nodes, nil, + func(n *Event, e *Alert) { n.Edges.Owner = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (eq *EventQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []*Event, init func(*Event), assign func(*Event, *Alert)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Event) + for i := range nodes { + if nodes[i].alert_events == nil { + continue + } + fk := *nodes[i].alert_events + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + query.Where(alert.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "alert_events" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (eq *EventQuery) sqlCount(ctx context.Context) (int, error) { + _spec := eq.querySpec() + _spec.Node.Columns = eq.fields + if len(eq.fields) > 0 { + _spec.Unique = eq.unique != nil && *eq.unique + } + return sqlgraph.CountNodes(ctx, eq.driver, _spec) +} + +func (eq *EventQuery) sqlExist(ctx context.Context) (bool, error) { + switch _, err := eq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: event.Table, + Columns: event.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + From: eq.sql, + Unique: true, + } + if unique := eq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := eq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, event.FieldID) + for i := range fields { + if fields[i] != event.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := eq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := eq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := eq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := eq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(eq.driver.Dialect()) + t1 := builder.Table(event.Table) + columns := eq.fields + if len(columns) == 0 { + columns = event.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if eq.sql != nil { + selector = eq.sql + selector.Select(selector.Columns(columns...)...) + } + if eq.unique != nil && *eq.unique { + selector.Distinct() + } + for _, p := range eq.predicates { + p(selector) + } + for _, p := range eq.order { + p(selector) + } + if offset := eq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := eq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// EventGroupBy is the group-by builder for Event entities. +type EventGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (egb *EventGroupBy) Aggregate(fns ...AggregateFunc) *EventGroupBy { + egb.fns = append(egb.fns, fns...) + return egb +} + +// Scan applies the group-by query and scans the result into the given value. +func (egb *EventGroupBy) Scan(ctx context.Context, v any) error { + query, err := egb.path(ctx) + if err != nil { + return err + } + egb.sql = query + return egb.sqlScan(ctx, v) +} + +func (egb *EventGroupBy) sqlScan(ctx context.Context, v any) error { + for _, f := range egb.fields { + if !event.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := egb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := egb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (egb *EventGroupBy) sqlQuery() *sql.Selector { + selector := egb.sql.Select() + aggregation := make([]string, 0, len(egb.fns)) + for _, fn := range egb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(egb.fields)+len(egb.fns)) + for _, f := range egb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(egb.fields...)...) +} + +// EventSelect is the builder for selecting fields of Event entities. +type EventSelect struct { + *EventQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (es *EventSelect) Scan(ctx context.Context, v any) error { + if err := es.prepareQuery(ctx); err != nil { + return err + } + es.sql = es.EventQuery.sqlQuery(ctx) + return es.sqlScan(ctx, v) +} + +func (es *EventSelect) sqlScan(ctx context.Context, v any) error { + rows := &sql.Rows{} + query, args := es.sql.Query() + if err := es.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go new file mode 100644 index 0000000..bb21074 --- /dev/null +++ b/pkg/database/ent/event_update.go @@ -0,0 +1,577 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// EventUpdate is the builder for updating Event entities. +type EventUpdate struct { + config + hooks []Hook + mutation *EventMutation +} + +// Where appends a list predicates to the EventUpdate builder. +func (eu *EventUpdate) Where(ps ...predicate.Event) *EventUpdate { + eu.mutation.Where(ps...) + return eu +} + +// SetCreatedAt sets the "created_at" field. +func (eu *EventUpdate) SetCreatedAt(t time.Time) *EventUpdate { + eu.mutation.SetCreatedAt(t) + return eu +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (eu *EventUpdate) ClearCreatedAt() *EventUpdate { + eu.mutation.ClearCreatedAt() + return eu +} + +// SetUpdatedAt sets the "updated_at" field. +func (eu *EventUpdate) SetUpdatedAt(t time.Time) *EventUpdate { + eu.mutation.SetUpdatedAt(t) + return eu +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (eu *EventUpdate) ClearUpdatedAt() *EventUpdate { + eu.mutation.ClearUpdatedAt() + return eu +} + +// SetTime sets the "time" field. +func (eu *EventUpdate) SetTime(t time.Time) *EventUpdate { + eu.mutation.SetTime(t) + return eu +} + +// SetSerialized sets the "serialized" field. +func (eu *EventUpdate) SetSerialized(s string) *EventUpdate { + eu.mutation.SetSerialized(s) + return eu +} + +// SetOwnerID sets the "owner" edge to the Alert entity by ID. +func (eu *EventUpdate) SetOwnerID(id int) *EventUpdate { + eu.mutation.SetOwnerID(id) + return eu +} + +// SetNillableOwnerID sets the "owner" edge to the Alert entity by ID if the given value is not nil. +func (eu *EventUpdate) SetNillableOwnerID(id *int) *EventUpdate { + if id != nil { + eu = eu.SetOwnerID(*id) + } + return eu +} + +// SetOwner sets the "owner" edge to the Alert entity. +func (eu *EventUpdate) SetOwner(a *Alert) *EventUpdate { + return eu.SetOwnerID(a.ID) +} + +// Mutation returns the EventMutation object of the builder. +func (eu *EventUpdate) Mutation() *EventMutation { + return eu.mutation +} + +// ClearOwner clears the "owner" edge to the Alert entity. +func (eu *EventUpdate) ClearOwner() *EventUpdate { + eu.mutation.ClearOwner() + return eu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (eu *EventUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + eu.defaults() + if len(eu.hooks) == 0 { + if err = eu.check(); err != nil { + return 0, err + } + affected, err = eu.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = eu.check(); err != nil { + return 0, err + } + eu.mutation = mutation + affected, err = eu.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(eu.hooks) - 1; i >= 0; i-- { + if eu.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = eu.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, eu.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (eu *EventUpdate) SaveX(ctx context.Context) int { + affected, err := eu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (eu *EventUpdate) Exec(ctx context.Context) error { + _, err := eu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (eu *EventUpdate) ExecX(ctx context.Context) { + if err := eu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (eu *EventUpdate) defaults() { + if _, ok := eu.mutation.CreatedAt(); !ok && !eu.mutation.CreatedAtCleared() { + v := event.UpdateDefaultCreatedAt() + eu.mutation.SetCreatedAt(v) + } + if _, ok := eu.mutation.UpdatedAt(); !ok && !eu.mutation.UpdatedAtCleared() { + v := event.UpdateDefaultUpdatedAt() + eu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (eu *EventUpdate) check() error { + if v, ok := eu.mutation.Serialized(); ok { + if err := event.SerializedValidator(v); err != nil { + return &ValidationError{Name: "serialized", err: fmt.Errorf(`ent: validator failed for field "Event.serialized": %w`, err)} + } + } + return nil +} + +func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: event.Table, + Columns: event.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + if ps := eu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := eu.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldCreatedAt, + }) + } + if eu.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: event.FieldCreatedAt, + }) + } + if value, ok := eu.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldUpdatedAt, + }) + } + if eu.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: event.FieldUpdatedAt, + }) + } + if value, ok := eu.mutation.Time(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldTime, + }) + } + if value, ok := eu.mutation.Serialized(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: event.FieldSerialized, + }) + } + if eu.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: event.OwnerTable, + Columns: []string{event.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := eu.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: event.OwnerTable, + Columns: []string{event.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, eu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{event.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// EventUpdateOne is the builder for updating a single Event entity. +type EventUpdateOne struct { + config + fields []string + hooks []Hook + mutation *EventMutation +} + +// SetCreatedAt sets the "created_at" field. +func (euo *EventUpdateOne) SetCreatedAt(t time.Time) *EventUpdateOne { + euo.mutation.SetCreatedAt(t) + return euo +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (euo *EventUpdateOne) ClearCreatedAt() *EventUpdateOne { + euo.mutation.ClearCreatedAt() + return euo +} + +// SetUpdatedAt sets the "updated_at" field. +func (euo *EventUpdateOne) SetUpdatedAt(t time.Time) *EventUpdateOne { + euo.mutation.SetUpdatedAt(t) + return euo +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (euo *EventUpdateOne) ClearUpdatedAt() *EventUpdateOne { + euo.mutation.ClearUpdatedAt() + return euo +} + +// SetTime sets the "time" field. +func (euo *EventUpdateOne) SetTime(t time.Time) *EventUpdateOne { + euo.mutation.SetTime(t) + return euo +} + +// SetSerialized sets the "serialized" field. +func (euo *EventUpdateOne) SetSerialized(s string) *EventUpdateOne { + euo.mutation.SetSerialized(s) + return euo +} + +// SetOwnerID sets the "owner" edge to the Alert entity by ID. +func (euo *EventUpdateOne) SetOwnerID(id int) *EventUpdateOne { + euo.mutation.SetOwnerID(id) + return euo +} + +// SetNillableOwnerID sets the "owner" edge to the Alert entity by ID if the given value is not nil. +func (euo *EventUpdateOne) SetNillableOwnerID(id *int) *EventUpdateOne { + if id != nil { + euo = euo.SetOwnerID(*id) + } + return euo +} + +// SetOwner sets the "owner" edge to the Alert entity. +func (euo *EventUpdateOne) SetOwner(a *Alert) *EventUpdateOne { + return euo.SetOwnerID(a.ID) +} + +// Mutation returns the EventMutation object of the builder. +func (euo *EventUpdateOne) Mutation() *EventMutation { + return euo.mutation +} + +// ClearOwner clears the "owner" edge to the Alert entity. +func (euo *EventUpdateOne) ClearOwner() *EventUpdateOne { + euo.mutation.ClearOwner() + return euo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (euo *EventUpdateOne) Select(field string, fields ...string) *EventUpdateOne { + euo.fields = append([]string{field}, fields...) + return euo +} + +// Save executes the query and returns the updated Event entity. +func (euo *EventUpdateOne) Save(ctx context.Context) (*Event, error) { + var ( + err error + node *Event + ) + euo.defaults() + if len(euo.hooks) == 0 { + if err = euo.check(); err != nil { + return nil, err + } + node, err = euo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = euo.check(); err != nil { + return nil, err + } + euo.mutation = mutation + node, err = euo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(euo.hooks) - 1; i >= 0; i-- { + if euo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = euo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, euo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Event) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from EventMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (euo *EventUpdateOne) SaveX(ctx context.Context) *Event { + node, err := euo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (euo *EventUpdateOne) Exec(ctx context.Context) error { + _, err := euo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (euo *EventUpdateOne) ExecX(ctx context.Context) { + if err := euo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (euo *EventUpdateOne) defaults() { + if _, ok := euo.mutation.CreatedAt(); !ok && !euo.mutation.CreatedAtCleared() { + v := event.UpdateDefaultCreatedAt() + euo.mutation.SetCreatedAt(v) + } + if _, ok := euo.mutation.UpdatedAt(); !ok && !euo.mutation.UpdatedAtCleared() { + v := event.UpdateDefaultUpdatedAt() + euo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (euo *EventUpdateOne) check() error { + if v, ok := euo.mutation.Serialized(); ok { + if err := event.SerializedValidator(v); err != nil { + return &ValidationError{Name: "serialized", err: fmt.Errorf(`ent: validator failed for field "Event.serialized": %w`, err)} + } + } + return nil +} + +func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: event.Table, + Columns: event.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: event.FieldID, + }, + }, + } + id, ok := euo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Event.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := euo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, event.FieldID) + for _, f := range fields { + if !event.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != event.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := euo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := euo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldCreatedAt, + }) + } + if euo.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: event.FieldCreatedAt, + }) + } + if value, ok := euo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldUpdatedAt, + }) + } + if euo.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: event.FieldUpdatedAt, + }) + } + if value, ok := euo.mutation.Time(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: event.FieldTime, + }) + } + if value, ok := euo.mutation.Serialized(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: event.FieldSerialized, + }) + } + if euo.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: event.OwnerTable, + Columns: []string{event.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := euo.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: event.OwnerTable, + Columns: []string{event.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Event{config: euo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, euo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{event.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/generate.go b/pkg/database/ent/generate.go new file mode 100644 index 0000000..9f3a916 --- /dev/null +++ b/pkg/database/ent/generate.go @@ -0,0 +1,4 @@ +package ent + +//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema + diff --git a/pkg/database/ent/hook/hook.go b/pkg/database/ent/hook/hook.go new file mode 100644 index 0000000..f82f659 --- /dev/null +++ b/pkg/database/ent/hook/hook.go @@ -0,0 +1,265 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +// The AlertFunc type is an adapter to allow the use of ordinary +// function as Alert mutator. +type AlertFunc func(context.Context, *ent.AlertMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AlertFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.AlertMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AlertMutation", m) + } + return f(ctx, mv) +} + +// The BouncerFunc type is an adapter to allow the use of ordinary +// function as Bouncer mutator. +type BouncerFunc func(context.Context, *ent.BouncerMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f BouncerFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.BouncerMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BouncerMutation", m) + } + return f(ctx, mv) +} + +// The DecisionFunc type is an adapter to allow the use of ordinary +// function as Decision mutator. +type DecisionFunc func(context.Context, *ent.DecisionMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f DecisionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.DecisionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DecisionMutation", m) + } + return f(ctx, mv) +} + +// The EventFunc type is an adapter to allow the use of ordinary +// function as Event mutator. +type EventFunc func(context.Context, *ent.EventMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f EventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.EventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventMutation", m) + } + return f(ctx, mv) +} + +// The MachineFunc type is an adapter to allow the use of ordinary +// function as Machine mutator. +type MachineFunc func(context.Context, *ent.MachineMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f MachineFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MachineMutation", m) + } + return f(ctx, mv) +} + +// The MetaFunc type is an adapter to allow the use of ordinary +// function as Meta mutator. +type MetaFunc func(context.Context, *ent.MetaMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f MetaFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + mv, ok := m.(*ent.MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m) + } + return f(ctx, mv) +} + +// Condition is a hook condition function. +type Condition func(context.Context, ent.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op ent.Op) Condition { + return func(_ context.Context, m ent.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk ent.Hook, cond Condition) ent.Hook { + return func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, ent.Delete|ent.Create) +func On(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, ent.Update|ent.UpdateOne) +func Unless(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) ent.Hook { + return func(ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []ent.Hook { +// return []ent.Hook{ +// Reject(ent.Delete|ent.Update), +// } +// } +func Reject(op ent.Op) ent.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []ent.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...ent.Hook) Chain { + return Chain{append([]ent.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() ent.Hook { + return func(mutator ent.Mutator) ent.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...ent.Hook) Chain { + newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go new file mode 100644 index 0000000..dc2b18e --- /dev/null +++ b/pkg/database/ent/machine.go @@ -0,0 +1,262 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" +) + +// Machine is the model entity for the Machine schema. +type Machine struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt *time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt *time.Time `json:"updated_at,omitempty"` + // LastPush holds the value of the "last_push" field. + LastPush *time.Time `json:"last_push,omitempty"` + // LastHeartbeat holds the value of the "last_heartbeat" field. + LastHeartbeat *time.Time `json:"last_heartbeat,omitempty"` + // MachineId holds the value of the "machineId" field. + MachineId string `json:"machineId,omitempty"` + // Password holds the value of the "password" field. + Password string `json:"-"` + // IpAddress holds the value of the "ipAddress" field. + IpAddress string `json:"ipAddress,omitempty"` + // Scenarios holds the value of the "scenarios" field. + Scenarios string `json:"scenarios,omitempty"` + // Version holds the value of the "version" field. + Version string `json:"version,omitempty"` + // IsValidated holds the value of the "isValidated" field. + IsValidated bool `json:"isValidated,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // AuthType holds the value of the "auth_type" field. + AuthType string `json:"auth_type"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the MachineQuery when eager-loading is set. + Edges MachineEdges `json:"edges"` +} + +// MachineEdges holds the relations/edges for other nodes in the graph. +type MachineEdges struct { + // Alerts holds the value of the alerts edge. + Alerts []*Alert `json:"alerts,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// AlertsOrErr returns the Alerts value or an error if the edge +// was not loaded in eager-loading. +func (e MachineEdges) AlertsOrErr() ([]*Alert, error) { + if e.loadedTypes[0] { + return e.Alerts, nil + } + return nil, &NotLoadedError{edge: "alerts"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Machine) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case machine.FieldIsValidated: + values[i] = new(sql.NullBool) + case machine.FieldID: + values[i] = new(sql.NullInt64) + case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus, machine.FieldAuthType: + values[i] = new(sql.NullString) + case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush, machine.FieldLastHeartbeat: + values[i] = new(sql.NullTime) + default: + return nil, fmt.Errorf("unexpected column %q for type Machine", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Machine fields. +func (m *Machine) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case machine.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + m.ID = int(value.Int64) + case machine.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + m.CreatedAt = new(time.Time) + *m.CreatedAt = value.Time + } + case machine.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + m.UpdatedAt = new(time.Time) + *m.UpdatedAt = value.Time + } + case machine.FieldLastPush: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_push", values[i]) + } else if value.Valid { + m.LastPush = new(time.Time) + *m.LastPush = value.Time + } + case machine.FieldLastHeartbeat: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_heartbeat", values[i]) + } else if value.Valid { + m.LastHeartbeat = new(time.Time) + *m.LastHeartbeat = value.Time + } + case machine.FieldMachineId: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field machineId", values[i]) + } else if value.Valid { + m.MachineId = value.String + } + case machine.FieldPassword: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field password", values[i]) + } else if value.Valid { + m.Password = value.String + } + case machine.FieldIpAddress: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field ipAddress", values[i]) + } else if value.Valid { + m.IpAddress = value.String + } + case machine.FieldScenarios: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scenarios", values[i]) + } else if value.Valid { + m.Scenarios = value.String + } + case machine.FieldVersion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field version", values[i]) + } else if value.Valid { + m.Version = value.String + } + case machine.FieldIsValidated: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field isValidated", values[i]) + } else if value.Valid { + m.IsValidated = value.Bool + } + case machine.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + m.Status = value.String + } + case machine.FieldAuthType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field auth_type", values[i]) + } else if value.Valid { + m.AuthType = value.String + } + } + } + return nil +} + +// QueryAlerts queries the "alerts" edge of the Machine entity. +func (m *Machine) QueryAlerts() *AlertQuery { + return (&MachineClient{config: m.config}).QueryAlerts(m) +} + +// Update returns a builder for updating this Machine. +// Note that you need to call Machine.Unwrap() before calling this method if this Machine +// was returned from a transaction, and the transaction was committed or rolled back. +func (m *Machine) Update() *MachineUpdateOne { + return (&MachineClient{config: m.config}).UpdateOne(m) +} + +// Unwrap unwraps the Machine entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (m *Machine) Unwrap() *Machine { + _tx, ok := m.config.driver.(*txDriver) + if !ok { + panic("ent: Machine is not a transactional entity") + } + m.config.driver = _tx.drv + return m +} + +// String implements the fmt.Stringer. +func (m *Machine) String() string { + var builder strings.Builder + builder.WriteString("Machine(") + builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) + if v := m.CreatedAt; v != nil { + builder.WriteString("created_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := m.UpdatedAt; v != nil { + builder.WriteString("updated_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := m.LastPush; v != nil { + builder.WriteString("last_push=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := m.LastHeartbeat; v != nil { + builder.WriteString("last_heartbeat=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("machineId=") + builder.WriteString(m.MachineId) + builder.WriteString(", ") + builder.WriteString("password=") + builder.WriteString(", ") + builder.WriteString("ipAddress=") + builder.WriteString(m.IpAddress) + builder.WriteString(", ") + builder.WriteString("scenarios=") + builder.WriteString(m.Scenarios) + builder.WriteString(", ") + builder.WriteString("version=") + builder.WriteString(m.Version) + builder.WriteString(", ") + builder.WriteString("isValidated=") + builder.WriteString(fmt.Sprintf("%v", m.IsValidated)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(m.Status) + builder.WriteString(", ") + builder.WriteString("auth_type=") + builder.WriteString(m.AuthType) + builder.WriteByte(')') + return builder.String() +} + +// Machines is a parsable slice of Machine. +type Machines []*Machine + +func (m Machines) config(cfg config) { + for _i := range m { + m[_i].config = cfg + } +} diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go new file mode 100644 index 0000000..e6900dd --- /dev/null +++ b/pkg/database/ent/machine/machine.go @@ -0,0 +1,101 @@ +// Code generated by ent, DO NOT EDIT. + +package machine + +import ( + "time" +) + +const ( + // Label holds the string label denoting the machine type in the database. + Label = "machine" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldLastPush holds the string denoting the last_push field in the database. + FieldLastPush = "last_push" + // FieldLastHeartbeat holds the string denoting the last_heartbeat field in the database. + FieldLastHeartbeat = "last_heartbeat" + // FieldMachineId holds the string denoting the machineid field in the database. + FieldMachineId = "machine_id" + // FieldPassword holds the string denoting the password field in the database. + FieldPassword = "password" + // FieldIpAddress holds the string denoting the ipaddress field in the database. + FieldIpAddress = "ip_address" + // FieldScenarios holds the string denoting the scenarios field in the database. + FieldScenarios = "scenarios" + // FieldVersion holds the string denoting the version field in the database. + FieldVersion = "version" + // FieldIsValidated holds the string denoting the isvalidated field in the database. + FieldIsValidated = "is_validated" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldAuthType holds the string denoting the auth_type field in the database. + FieldAuthType = "auth_type" + // EdgeAlerts holds the string denoting the alerts edge name in mutations. + EdgeAlerts = "alerts" + // Table holds the table name of the machine in the database. + Table = "machines" + // AlertsTable is the table that holds the alerts relation/edge. + AlertsTable = "alerts" + // AlertsInverseTable is the table name for the Alert entity. + // It exists in this package in order to avoid circular dependency with the "alert" package. + AlertsInverseTable = "alerts" + // AlertsColumn is the table column denoting the alerts relation/edge. + AlertsColumn = "machine_alerts" +) + +// Columns holds all SQL columns for machine fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldLastPush, + FieldLastHeartbeat, + FieldMachineId, + FieldPassword, + FieldIpAddress, + FieldScenarios, + FieldVersion, + FieldIsValidated, + FieldStatus, + FieldAuthType, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. + UpdateDefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DefaultLastPush holds the default value on creation for the "last_push" field. + DefaultLastPush func() time.Time + // UpdateDefaultLastPush holds the default value on update for the "last_push" field. + UpdateDefaultLastPush func() time.Time + // DefaultLastHeartbeat holds the default value on creation for the "last_heartbeat" field. + DefaultLastHeartbeat func() time.Time + // UpdateDefaultLastHeartbeat holds the default value on update for the "last_heartbeat" field. + UpdateDefaultLastHeartbeat func() time.Time + // ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. + ScenariosValidator func(string) error + // DefaultIsValidated holds the default value on creation for the "isValidated" field. + DefaultIsValidated bool + // DefaultAuthType holds the default value on creation for the "auth_type" field. + DefaultAuthType string +) diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go new file mode 100644 index 0000000..7d02277 --- /dev/null +++ b/pkg/database/ent/machine/where.go @@ -0,0 +1,1287 @@ +// Code generated by ent, DO NOT EDIT. + +package machine + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// LastPush applies equality check predicate on the "last_push" field. It's identical to LastPushEQ. +func LastPush(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLastPush), v)) + }) +} + +// LastHeartbeat applies equality check predicate on the "last_heartbeat" field. It's identical to LastHeartbeatEQ. +func LastHeartbeat(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLastHeartbeat), v)) + }) +} + +// MachineId applies equality check predicate on the "machineId" field. It's identical to MachineIdEQ. +func MachineId(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMachineId), v)) + }) +} + +// Password applies equality check predicate on the "password" field. It's identical to PasswordEQ. +func Password(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldPassword), v)) + }) +} + +// IpAddress applies equality check predicate on the "ipAddress" field. It's identical to IpAddressEQ. +func IpAddress(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIpAddress), v)) + }) +} + +// Scenarios applies equality check predicate on the "scenarios" field. It's identical to ScenariosEQ. +func Scenarios(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarios), v)) + }) +} + +// Version applies equality check predicate on the "version" field. It's identical to VersionEQ. +func Version(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldVersion), v)) + }) +} + +// IsValidated applies equality check predicate on the "isValidated" field. It's identical to IsValidatedEQ. +func IsValidated(v bool) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIsValidated), v)) + }) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStatus), v)) + }) +} + +// AuthType applies equality check predicate on the "auth_type" field. It's identical to AuthTypeEQ. +func AuthType(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldAuthType), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. +func CreatedAtIsNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldCreatedAt))) + }) +} + +// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. +func CreatedAtNotNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldCreatedAt))) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. +func UpdatedAtIsNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldUpdatedAt))) + }) +} + +// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. +func UpdatedAtNotNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldUpdatedAt))) + }) +} + +// LastPushEQ applies the EQ predicate on the "last_push" field. +func LastPushEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLastPush), v)) + }) +} + +// LastPushNEQ applies the NEQ predicate on the "last_push" field. +func LastPushNEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldLastPush), v)) + }) +} + +// LastPushIn applies the In predicate on the "last_push" field. +func LastPushIn(vs ...time.Time) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldLastPush), v...)) + }) +} + +// LastPushNotIn applies the NotIn predicate on the "last_push" field. +func LastPushNotIn(vs ...time.Time) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldLastPush), v...)) + }) +} + +// LastPushGT applies the GT predicate on the "last_push" field. +func LastPushGT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldLastPush), v)) + }) +} + +// LastPushGTE applies the GTE predicate on the "last_push" field. +func LastPushGTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldLastPush), v)) + }) +} + +// LastPushLT applies the LT predicate on the "last_push" field. +func LastPushLT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldLastPush), v)) + }) +} + +// LastPushLTE applies the LTE predicate on the "last_push" field. +func LastPushLTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldLastPush), v)) + }) +} + +// LastPushIsNil applies the IsNil predicate on the "last_push" field. +func LastPushIsNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldLastPush))) + }) +} + +// LastPushNotNil applies the NotNil predicate on the "last_push" field. +func LastPushNotNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldLastPush))) + }) +} + +// LastHeartbeatEQ applies the EQ predicate on the "last_heartbeat" field. +func LastHeartbeatEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldLastHeartbeat), v)) + }) +} + +// LastHeartbeatNEQ applies the NEQ predicate on the "last_heartbeat" field. +func LastHeartbeatNEQ(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldLastHeartbeat), v)) + }) +} + +// LastHeartbeatIn applies the In predicate on the "last_heartbeat" field. +func LastHeartbeatIn(vs ...time.Time) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldLastHeartbeat), v...)) + }) +} + +// LastHeartbeatNotIn applies the NotIn predicate on the "last_heartbeat" field. +func LastHeartbeatNotIn(vs ...time.Time) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldLastHeartbeat), v...)) + }) +} + +// LastHeartbeatGT applies the GT predicate on the "last_heartbeat" field. +func LastHeartbeatGT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldLastHeartbeat), v)) + }) +} + +// LastHeartbeatGTE applies the GTE predicate on the "last_heartbeat" field. +func LastHeartbeatGTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldLastHeartbeat), v)) + }) +} + +// LastHeartbeatLT applies the LT predicate on the "last_heartbeat" field. +func LastHeartbeatLT(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldLastHeartbeat), v)) + }) +} + +// LastHeartbeatLTE applies the LTE predicate on the "last_heartbeat" field. +func LastHeartbeatLTE(v time.Time) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldLastHeartbeat), v)) + }) +} + +// LastHeartbeatIsNil applies the IsNil predicate on the "last_heartbeat" field. +func LastHeartbeatIsNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldLastHeartbeat))) + }) +} + +// LastHeartbeatNotNil applies the NotNil predicate on the "last_heartbeat" field. +func LastHeartbeatNotNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldLastHeartbeat))) + }) +} + +// MachineIdEQ applies the EQ predicate on the "machineId" field. +func MachineIdEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldMachineId), v)) + }) +} + +// MachineIdNEQ applies the NEQ predicate on the "machineId" field. +func MachineIdNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldMachineId), v)) + }) +} + +// MachineIdIn applies the In predicate on the "machineId" field. +func MachineIdIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldMachineId), v...)) + }) +} + +// MachineIdNotIn applies the NotIn predicate on the "machineId" field. +func MachineIdNotIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldMachineId), v...)) + }) +} + +// MachineIdGT applies the GT predicate on the "machineId" field. +func MachineIdGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldMachineId), v)) + }) +} + +// MachineIdGTE applies the GTE predicate on the "machineId" field. +func MachineIdGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldMachineId), v)) + }) +} + +// MachineIdLT applies the LT predicate on the "machineId" field. +func MachineIdLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldMachineId), v)) + }) +} + +// MachineIdLTE applies the LTE predicate on the "machineId" field. +func MachineIdLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldMachineId), v)) + }) +} + +// MachineIdContains applies the Contains predicate on the "machineId" field. +func MachineIdContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldMachineId), v)) + }) +} + +// MachineIdHasPrefix applies the HasPrefix predicate on the "machineId" field. +func MachineIdHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldMachineId), v)) + }) +} + +// MachineIdHasSuffix applies the HasSuffix predicate on the "machineId" field. +func MachineIdHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldMachineId), v)) + }) +} + +// MachineIdEqualFold applies the EqualFold predicate on the "machineId" field. +func MachineIdEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldMachineId), v)) + }) +} + +// MachineIdContainsFold applies the ContainsFold predicate on the "machineId" field. +func MachineIdContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldMachineId), v)) + }) +} + +// PasswordEQ applies the EQ predicate on the "password" field. +func PasswordEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldPassword), v)) + }) +} + +// PasswordNEQ applies the NEQ predicate on the "password" field. +func PasswordNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldPassword), v)) + }) +} + +// PasswordIn applies the In predicate on the "password" field. +func PasswordIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldPassword), v...)) + }) +} + +// PasswordNotIn applies the NotIn predicate on the "password" field. +func PasswordNotIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldPassword), v...)) + }) +} + +// PasswordGT applies the GT predicate on the "password" field. +func PasswordGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldPassword), v)) + }) +} + +// PasswordGTE applies the GTE predicate on the "password" field. +func PasswordGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldPassword), v)) + }) +} + +// PasswordLT applies the LT predicate on the "password" field. +func PasswordLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldPassword), v)) + }) +} + +// PasswordLTE applies the LTE predicate on the "password" field. +func PasswordLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldPassword), v)) + }) +} + +// PasswordContains applies the Contains predicate on the "password" field. +func PasswordContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldPassword), v)) + }) +} + +// PasswordHasPrefix applies the HasPrefix predicate on the "password" field. +func PasswordHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldPassword), v)) + }) +} + +// PasswordHasSuffix applies the HasSuffix predicate on the "password" field. +func PasswordHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldPassword), v)) + }) +} + +// PasswordEqualFold applies the EqualFold predicate on the "password" field. +func PasswordEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldPassword), v)) + }) +} + +// PasswordContainsFold applies the ContainsFold predicate on the "password" field. +func PasswordContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldPassword), v)) + }) +} + +// IpAddressEQ applies the EQ predicate on the "ipAddress" field. +func IpAddressEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressNEQ applies the NEQ predicate on the "ipAddress" field. +func IpAddressNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressIn applies the In predicate on the "ipAddress" field. +func IpAddressIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldIpAddress), v...)) + }) +} + +// IpAddressNotIn applies the NotIn predicate on the "ipAddress" field. +func IpAddressNotIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldIpAddress), v...)) + }) +} + +// IpAddressGT applies the GT predicate on the "ipAddress" field. +func IpAddressGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressGTE applies the GTE predicate on the "ipAddress" field. +func IpAddressGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressLT applies the LT predicate on the "ipAddress" field. +func IpAddressLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressLTE applies the LTE predicate on the "ipAddress" field. +func IpAddressLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressContains applies the Contains predicate on the "ipAddress" field. +func IpAddressContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressHasPrefix applies the HasPrefix predicate on the "ipAddress" field. +func IpAddressHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressHasSuffix applies the HasSuffix predicate on the "ipAddress" field. +func IpAddressHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressEqualFold applies the EqualFold predicate on the "ipAddress" field. +func IpAddressEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldIpAddress), v)) + }) +} + +// IpAddressContainsFold applies the ContainsFold predicate on the "ipAddress" field. +func IpAddressContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldIpAddress), v)) + }) +} + +// ScenariosEQ applies the EQ predicate on the "scenarios" field. +func ScenariosEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldScenarios), v)) + }) +} + +// ScenariosNEQ applies the NEQ predicate on the "scenarios" field. +func ScenariosNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldScenarios), v)) + }) +} + +// ScenariosIn applies the In predicate on the "scenarios" field. +func ScenariosIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldScenarios), v...)) + }) +} + +// ScenariosNotIn applies the NotIn predicate on the "scenarios" field. +func ScenariosNotIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldScenarios), v...)) + }) +} + +// ScenariosGT applies the GT predicate on the "scenarios" field. +func ScenariosGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldScenarios), v)) + }) +} + +// ScenariosGTE applies the GTE predicate on the "scenarios" field. +func ScenariosGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldScenarios), v)) + }) +} + +// ScenariosLT applies the LT predicate on the "scenarios" field. +func ScenariosLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldScenarios), v)) + }) +} + +// ScenariosLTE applies the LTE predicate on the "scenarios" field. +func ScenariosLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldScenarios), v)) + }) +} + +// ScenariosContains applies the Contains predicate on the "scenarios" field. +func ScenariosContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldScenarios), v)) + }) +} + +// ScenariosHasPrefix applies the HasPrefix predicate on the "scenarios" field. +func ScenariosHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldScenarios), v)) + }) +} + +// ScenariosHasSuffix applies the HasSuffix predicate on the "scenarios" field. +func ScenariosHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldScenarios), v)) + }) +} + +// ScenariosIsNil applies the IsNil predicate on the "scenarios" field. +func ScenariosIsNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldScenarios))) + }) +} + +// ScenariosNotNil applies the NotNil predicate on the "scenarios" field. +func ScenariosNotNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldScenarios))) + }) +} + +// ScenariosEqualFold applies the EqualFold predicate on the "scenarios" field. +func ScenariosEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldScenarios), v)) + }) +} + +// ScenariosContainsFold applies the ContainsFold predicate on the "scenarios" field. +func ScenariosContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldScenarios), v)) + }) +} + +// VersionEQ applies the EQ predicate on the "version" field. +func VersionEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldVersion), v)) + }) +} + +// VersionNEQ applies the NEQ predicate on the "version" field. +func VersionNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldVersion), v)) + }) +} + +// VersionIn applies the In predicate on the "version" field. +func VersionIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldVersion), v...)) + }) +} + +// VersionNotIn applies the NotIn predicate on the "version" field. +func VersionNotIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldVersion), v...)) + }) +} + +// VersionGT applies the GT predicate on the "version" field. +func VersionGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldVersion), v)) + }) +} + +// VersionGTE applies the GTE predicate on the "version" field. +func VersionGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldVersion), v)) + }) +} + +// VersionLT applies the LT predicate on the "version" field. +func VersionLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldVersion), v)) + }) +} + +// VersionLTE applies the LTE predicate on the "version" field. +func VersionLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldVersion), v)) + }) +} + +// VersionContains applies the Contains predicate on the "version" field. +func VersionContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldVersion), v)) + }) +} + +// VersionHasPrefix applies the HasPrefix predicate on the "version" field. +func VersionHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldVersion), v)) + }) +} + +// VersionHasSuffix applies the HasSuffix predicate on the "version" field. +func VersionHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldVersion), v)) + }) +} + +// VersionIsNil applies the IsNil predicate on the "version" field. +func VersionIsNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldVersion))) + }) +} + +// VersionNotNil applies the NotNil predicate on the "version" field. +func VersionNotNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldVersion))) + }) +} + +// VersionEqualFold applies the EqualFold predicate on the "version" field. +func VersionEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldVersion), v)) + }) +} + +// VersionContainsFold applies the ContainsFold predicate on the "version" field. +func VersionContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldVersion), v)) + }) +} + +// IsValidatedEQ applies the EQ predicate on the "isValidated" field. +func IsValidatedEQ(v bool) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldIsValidated), v)) + }) +} + +// IsValidatedNEQ applies the NEQ predicate on the "isValidated" field. +func IsValidatedNEQ(v bool) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldIsValidated), v)) + }) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldStatus), v)) + }) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldStatus), v)) + }) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldStatus), v...)) + }) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldStatus), v...)) + }) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldStatus), v)) + }) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldStatus), v)) + }) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldStatus), v)) + }) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldStatus), v)) + }) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldStatus), v)) + }) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldStatus), v)) + }) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldStatus), v)) + }) +} + +// StatusIsNil applies the IsNil predicate on the "status" field. +func StatusIsNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldStatus))) + }) +} + +// StatusNotNil applies the NotNil predicate on the "status" field. +func StatusNotNil() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldStatus))) + }) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldStatus), v)) + }) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldStatus), v)) + }) +} + +// AuthTypeEQ applies the EQ predicate on the "auth_type" field. +func AuthTypeEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeNEQ applies the NEQ predicate on the "auth_type" field. +func AuthTypeNEQ(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeIn applies the In predicate on the "auth_type" field. +func AuthTypeIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldAuthType), v...)) + }) +} + +// AuthTypeNotIn applies the NotIn predicate on the "auth_type" field. +func AuthTypeNotIn(vs ...string) predicate.Machine { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldAuthType), v...)) + }) +} + +// AuthTypeGT applies the GT predicate on the "auth_type" field. +func AuthTypeGT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeGTE applies the GTE predicate on the "auth_type" field. +func AuthTypeGTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeLT applies the LT predicate on the "auth_type" field. +func AuthTypeLT(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeLTE applies the LTE predicate on the "auth_type" field. +func AuthTypeLTE(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeContains applies the Contains predicate on the "auth_type" field. +func AuthTypeContains(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeHasPrefix applies the HasPrefix predicate on the "auth_type" field. +func AuthTypeHasPrefix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeHasSuffix applies the HasSuffix predicate on the "auth_type" field. +func AuthTypeHasSuffix(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeEqualFold applies the EqualFold predicate on the "auth_type" field. +func AuthTypeEqualFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldAuthType), v)) + }) +} + +// AuthTypeContainsFold applies the ContainsFold predicate on the "auth_type" field. +func AuthTypeContainsFold(v string) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldAuthType), v)) + }) +} + +// HasAlerts applies the HasEdge predicate on the "alerts" edge. +func HasAlerts() predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AlertsTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAlertsWith applies the HasEdge predicate on the "alerts" edge with a given conditions (other predicates). +func HasAlertsWith(preds ...predicate.Alert) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AlertsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Machine) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Machine) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Machine) predicate.Machine { + return predicate.Machine(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go new file mode 100644 index 0000000..efe0278 --- /dev/null +++ b/pkg/database/ent/machine_create.go @@ -0,0 +1,535 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" +) + +// MachineCreate is the builder for creating a Machine entity. +type MachineCreate struct { + config + mutation *MachineMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (mc *MachineCreate) SetCreatedAt(t time.Time) *MachineCreate { + mc.mutation.SetCreatedAt(t) + return mc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (mc *MachineCreate) SetNillableCreatedAt(t *time.Time) *MachineCreate { + if t != nil { + mc.SetCreatedAt(*t) + } + return mc +} + +// SetUpdatedAt sets the "updated_at" field. +func (mc *MachineCreate) SetUpdatedAt(t time.Time) *MachineCreate { + mc.mutation.SetUpdatedAt(t) + return mc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (mc *MachineCreate) SetNillableUpdatedAt(t *time.Time) *MachineCreate { + if t != nil { + mc.SetUpdatedAt(*t) + } + return mc +} + +// SetLastPush sets the "last_push" field. +func (mc *MachineCreate) SetLastPush(t time.Time) *MachineCreate { + mc.mutation.SetLastPush(t) + return mc +} + +// SetNillableLastPush sets the "last_push" field if the given value is not nil. +func (mc *MachineCreate) SetNillableLastPush(t *time.Time) *MachineCreate { + if t != nil { + mc.SetLastPush(*t) + } + return mc +} + +// SetLastHeartbeat sets the "last_heartbeat" field. +func (mc *MachineCreate) SetLastHeartbeat(t time.Time) *MachineCreate { + mc.mutation.SetLastHeartbeat(t) + return mc +} + +// SetNillableLastHeartbeat sets the "last_heartbeat" field if the given value is not nil. +func (mc *MachineCreate) SetNillableLastHeartbeat(t *time.Time) *MachineCreate { + if t != nil { + mc.SetLastHeartbeat(*t) + } + return mc +} + +// SetMachineId sets the "machineId" field. +func (mc *MachineCreate) SetMachineId(s string) *MachineCreate { + mc.mutation.SetMachineId(s) + return mc +} + +// SetPassword sets the "password" field. +func (mc *MachineCreate) SetPassword(s string) *MachineCreate { + mc.mutation.SetPassword(s) + return mc +} + +// SetIpAddress sets the "ipAddress" field. +func (mc *MachineCreate) SetIpAddress(s string) *MachineCreate { + mc.mutation.SetIpAddress(s) + return mc +} + +// SetScenarios sets the "scenarios" field. +func (mc *MachineCreate) SetScenarios(s string) *MachineCreate { + mc.mutation.SetScenarios(s) + return mc +} + +// SetNillableScenarios sets the "scenarios" field if the given value is not nil. +func (mc *MachineCreate) SetNillableScenarios(s *string) *MachineCreate { + if s != nil { + mc.SetScenarios(*s) + } + return mc +} + +// SetVersion sets the "version" field. +func (mc *MachineCreate) SetVersion(s string) *MachineCreate { + mc.mutation.SetVersion(s) + return mc +} + +// SetNillableVersion sets the "version" field if the given value is not nil. +func (mc *MachineCreate) SetNillableVersion(s *string) *MachineCreate { + if s != nil { + mc.SetVersion(*s) + } + return mc +} + +// SetIsValidated sets the "isValidated" field. +func (mc *MachineCreate) SetIsValidated(b bool) *MachineCreate { + mc.mutation.SetIsValidated(b) + return mc +} + +// SetNillableIsValidated sets the "isValidated" field if the given value is not nil. +func (mc *MachineCreate) SetNillableIsValidated(b *bool) *MachineCreate { + if b != nil { + mc.SetIsValidated(*b) + } + return mc +} + +// SetStatus sets the "status" field. +func (mc *MachineCreate) SetStatus(s string) *MachineCreate { + mc.mutation.SetStatus(s) + return mc +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (mc *MachineCreate) SetNillableStatus(s *string) *MachineCreate { + if s != nil { + mc.SetStatus(*s) + } + return mc +} + +// SetAuthType sets the "auth_type" field. +func (mc *MachineCreate) SetAuthType(s string) *MachineCreate { + mc.mutation.SetAuthType(s) + return mc +} + +// SetNillableAuthType sets the "auth_type" field if the given value is not nil. +func (mc *MachineCreate) SetNillableAuthType(s *string) *MachineCreate { + if s != nil { + mc.SetAuthType(*s) + } + return mc +} + +// AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. +func (mc *MachineCreate) AddAlertIDs(ids ...int) *MachineCreate { + mc.mutation.AddAlertIDs(ids...) + return mc +} + +// AddAlerts adds the "alerts" edges to the Alert entity. +func (mc *MachineCreate) AddAlerts(a ...*Alert) *MachineCreate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return mc.AddAlertIDs(ids...) +} + +// Mutation returns the MachineMutation object of the builder. +func (mc *MachineCreate) Mutation() *MachineMutation { + return mc.mutation +} + +// Save creates the Machine in the database. +func (mc *MachineCreate) Save(ctx context.Context) (*Machine, error) { + var ( + err error + node *Machine + ) + mc.defaults() + if len(mc.hooks) == 0 { + if err = mc.check(); err != nil { + return nil, err + } + node, err = mc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = mc.check(); err != nil { + return nil, err + } + mc.mutation = mutation + if node, err = mc.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(mc.hooks) - 1; i >= 0; i-- { + if mc.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = mc.hooks[i](mut) + } + v, err := mut.Mutate(ctx, mc.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Machine) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from MachineMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (mc *MachineCreate) SaveX(ctx context.Context) *Machine { + v, err := mc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mc *MachineCreate) Exec(ctx context.Context) error { + _, err := mc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mc *MachineCreate) ExecX(ctx context.Context) { + if err := mc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (mc *MachineCreate) defaults() { + if _, ok := mc.mutation.CreatedAt(); !ok { + v := machine.DefaultCreatedAt() + mc.mutation.SetCreatedAt(v) + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + v := machine.DefaultUpdatedAt() + mc.mutation.SetUpdatedAt(v) + } + if _, ok := mc.mutation.LastPush(); !ok { + v := machine.DefaultLastPush() + mc.mutation.SetLastPush(v) + } + if _, ok := mc.mutation.LastHeartbeat(); !ok { + v := machine.DefaultLastHeartbeat() + mc.mutation.SetLastHeartbeat(v) + } + if _, ok := mc.mutation.IsValidated(); !ok { + v := machine.DefaultIsValidated + mc.mutation.SetIsValidated(v) + } + if _, ok := mc.mutation.AuthType(); !ok { + v := machine.DefaultAuthType + mc.mutation.SetAuthType(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mc *MachineCreate) check() error { + if _, ok := mc.mutation.MachineId(); !ok { + return &ValidationError{Name: "machineId", err: errors.New(`ent: missing required field "Machine.machineId"`)} + } + if _, ok := mc.mutation.Password(); !ok { + return &ValidationError{Name: "password", err: errors.New(`ent: missing required field "Machine.password"`)} + } + if _, ok := mc.mutation.IpAddress(); !ok { + return &ValidationError{Name: "ipAddress", err: errors.New(`ent: missing required field "Machine.ipAddress"`)} + } + if v, ok := mc.mutation.Scenarios(); ok { + if err := machine.ScenariosValidator(v); err != nil { + return &ValidationError{Name: "scenarios", err: fmt.Errorf(`ent: validator failed for field "Machine.scenarios": %w`, err)} + } + } + if _, ok := mc.mutation.IsValidated(); !ok { + return &ValidationError{Name: "isValidated", err: errors.New(`ent: missing required field "Machine.isValidated"`)} + } + if _, ok := mc.mutation.AuthType(); !ok { + return &ValidationError{Name: "auth_type", err: errors.New(`ent: missing required field "Machine.auth_type"`)} + } + return nil +} + +func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) { + _node, _spec := mc.createSpec() + if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { + var ( + _node = &Machine{config: mc.config} + _spec = &sqlgraph.CreateSpec{ + Table: machine.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + } + ) + if value, ok := mc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldCreatedAt, + }) + _node.CreatedAt = &value + } + if value, ok := mc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldUpdatedAt, + }) + _node.UpdatedAt = &value + } + if value, ok := mc.mutation.LastPush(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldLastPush, + }) + _node.LastPush = &value + } + if value, ok := mc.mutation.LastHeartbeat(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldLastHeartbeat, + }) + _node.LastHeartbeat = &value + } + if value, ok := mc.mutation.MachineId(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldMachineId, + }) + _node.MachineId = value + } + if value, ok := mc.mutation.Password(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldPassword, + }) + _node.Password = value + } + if value, ok := mc.mutation.IpAddress(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldIpAddress, + }) + _node.IpAddress = value + } + if value, ok := mc.mutation.Scenarios(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldScenarios, + }) + _node.Scenarios = value + } + if value, ok := mc.mutation.Version(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldVersion, + }) + _node.Version = value + } + if value, ok := mc.mutation.IsValidated(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: machine.FieldIsValidated, + }) + _node.IsValidated = value + } + if value, ok := mc.mutation.Status(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldStatus, + }) + _node.Status = value + } + if value, ok := mc.mutation.AuthType(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldAuthType, + }) + _node.AuthType = value + } + if nodes := mc.mutation.AlertsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// MachineCreateBulk is the builder for creating many Machine entities in bulk. +type MachineCreateBulk struct { + config + builders []*MachineCreate +} + +// Save creates the Machine entities in the database. +func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) { + specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) + nodes := make([]*Machine, len(mcb.builders)) + mutators := make([]Mutator, len(mcb.builders)) + for i := range mcb.builders { + func(i int, root context.Context) { + builder := mcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, mcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (mcb *MachineCreateBulk) SaveX(ctx context.Context) []*Machine { + v, err := mcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mcb *MachineCreateBulk) Exec(ctx context.Context) error { + _, err := mcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mcb *MachineCreateBulk) ExecX(ctx context.Context) { + if err := mcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/machine_delete.go b/pkg/database/ent/machine_delete.go new file mode 100644 index 0000000..bead8ac --- /dev/null +++ b/pkg/database/ent/machine_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MachineDelete is the builder for deleting a Machine entity. +type MachineDelete struct { + config + hooks []Hook + mutation *MachineMutation +} + +// Where appends a list predicates to the MachineDelete builder. +func (md *MachineDelete) Where(ps ...predicate.Machine) *MachineDelete { + md.mutation.Where(ps...) + return md +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (md *MachineDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(md.hooks) == 0 { + affected, err = md.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + md.mutation = mutation + affected, err = md.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(md.hooks) - 1; i >= 0; i-- { + if md.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = md.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, md.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (md *MachineDelete) ExecX(ctx context.Context) int { + n, err := md.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (md *MachineDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: machine.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + if ps := md.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, md.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// MachineDeleteOne is the builder for deleting a single Machine entity. +type MachineDeleteOne struct { + md *MachineDelete +} + +// Exec executes the deletion query. +func (mdo *MachineDeleteOne) Exec(ctx context.Context) error { + n, err := mdo.md.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{machine.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (mdo *MachineDeleteOne) ExecX(ctx context.Context) { + mdo.md.ExecX(ctx) +} diff --git a/pkg/database/ent/machine_query.go b/pkg/database/ent/machine_query.go new file mode 100644 index 0000000..2839142 --- /dev/null +++ b/pkg/database/ent/machine_query.go @@ -0,0 +1,609 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MachineQuery is the builder for querying Machine entities. +type MachineQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.Machine + withAlerts *AlertQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the MachineQuery builder. +func (mq *MachineQuery) Where(ps ...predicate.Machine) *MachineQuery { + mq.predicates = append(mq.predicates, ps...) + return mq +} + +// Limit adds a limit step to the query. +func (mq *MachineQuery) Limit(limit int) *MachineQuery { + mq.limit = &limit + return mq +} + +// Offset adds an offset step to the query. +func (mq *MachineQuery) Offset(offset int) *MachineQuery { + mq.offset = &offset + return mq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (mq *MachineQuery) Unique(unique bool) *MachineQuery { + mq.unique = &unique + return mq +} + +// Order adds an order step to the query. +func (mq *MachineQuery) Order(o ...OrderFunc) *MachineQuery { + mq.order = append(mq.order, o...) + return mq +} + +// QueryAlerts chains the current query on the "alerts" edge. +func (mq *MachineQuery) QueryAlerts() *AlertQuery { + query := &AlertQuery{config: mq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := mq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(machine.Table, machine.FieldID, selector), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, machine.AlertsTable, machine.AlertsColumn), + ) + fromU = sqlgraph.SetNeighbors(mq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Machine entity from the query. +// Returns a *NotFoundError when no Machine was found. +func (mq *MachineQuery) First(ctx context.Context) (*Machine, error) { + nodes, err := mq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{machine.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (mq *MachineQuery) FirstX(ctx context.Context) *Machine { + node, err := mq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Machine ID from the query. +// Returns a *NotFoundError when no Machine ID was found. +func (mq *MachineQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{machine.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (mq *MachineQuery) FirstIDX(ctx context.Context) int { + id, err := mq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Machine entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Machine entity is found. +// Returns a *NotFoundError when no Machine entities are found. +func (mq *MachineQuery) Only(ctx context.Context) (*Machine, error) { + nodes, err := mq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{machine.Label} + default: + return nil, &NotSingularError{machine.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (mq *MachineQuery) OnlyX(ctx context.Context) *Machine { + node, err := mq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Machine ID in the query. +// Returns a *NotSingularError when more than one Machine ID is found. +// Returns a *NotFoundError when no entities are found. +func (mq *MachineQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{machine.Label} + default: + err = &NotSingularError{machine.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (mq *MachineQuery) OnlyIDX(ctx context.Context) int { + id, err := mq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Machines. +func (mq *MachineQuery) All(ctx context.Context) ([]*Machine, error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + return mq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (mq *MachineQuery) AllX(ctx context.Context) []*Machine { + nodes, err := mq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Machine IDs. +func (mq *MachineQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := mq.Select(machine.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (mq *MachineQuery) IDsX(ctx context.Context) []int { + ids, err := mq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (mq *MachineQuery) Count(ctx context.Context) (int, error) { + if err := mq.prepareQuery(ctx); err != nil { + return 0, err + } + return mq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (mq *MachineQuery) CountX(ctx context.Context) int { + count, err := mq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (mq *MachineQuery) Exist(ctx context.Context) (bool, error) { + if err := mq.prepareQuery(ctx); err != nil { + return false, err + } + return mq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (mq *MachineQuery) ExistX(ctx context.Context) bool { + exist, err := mq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the MachineQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (mq *MachineQuery) Clone() *MachineQuery { + if mq == nil { + return nil + } + return &MachineQuery{ + config: mq.config, + limit: mq.limit, + offset: mq.offset, + order: append([]OrderFunc{}, mq.order...), + predicates: append([]predicate.Machine{}, mq.predicates...), + withAlerts: mq.withAlerts.Clone(), + // clone intermediate query. + sql: mq.sql.Clone(), + path: mq.path, + unique: mq.unique, + } +} + +// WithAlerts tells the query-builder to eager-load the nodes that are connected to +// the "alerts" edge. The optional arguments are used to configure the query builder of the edge. +func (mq *MachineQuery) WithAlerts(opts ...func(*AlertQuery)) *MachineQuery { + query := &AlertQuery{config: mq.config} + for _, opt := range opts { + opt(query) + } + mq.withAlerts = query + return mq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Machine.Query(). +// GroupBy(machine.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy { + grbuild := &MachineGroupBy{config: mq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + return mq.sqlQuery(ctx), nil + } + grbuild.label = machine.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Machine.Query(). +// Select(machine.FieldCreatedAt). +// Scan(ctx, &v) +func (mq *MachineQuery) Select(fields ...string) *MachineSelect { + mq.fields = append(mq.fields, fields...) + selbuild := &MachineSelect{MachineQuery: mq} + selbuild.label = machine.Label + selbuild.flds, selbuild.scan = &mq.fields, selbuild.Scan + return selbuild +} + +func (mq *MachineQuery) prepareQuery(ctx context.Context) error { + for _, f := range mq.fields { + if !machine.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if mq.path != nil { + prev, err := mq.path(ctx) + if err != nil { + return err + } + mq.sql = prev + } + return nil +} + +func (mq *MachineQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Machine, error) { + var ( + nodes = []*Machine{} + _spec = mq.querySpec() + loadedTypes = [1]bool{ + mq.withAlerts != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Machine).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Machine{config: mq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, mq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := mq.withAlerts; query != nil { + if err := mq.loadAlerts(ctx, query, nodes, + func(n *Machine) { n.Edges.Alerts = []*Alert{} }, + func(n *Machine, e *Alert) { n.Edges.Alerts = append(n.Edges.Alerts, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes []*Machine, init func(*Machine), assign func(*Machine, *Alert)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Machine) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Alert(func(s *sql.Selector) { + s.Where(sql.InValues(machine.AlertsColumn, fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.machine_alerts + if fk == nil { + return fmt.Errorf(`foreign-key "machine_alerts" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected foreign-key "machine_alerts" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (mq *MachineQuery) sqlCount(ctx context.Context) (int, error) { + _spec := mq.querySpec() + _spec.Node.Columns = mq.fields + if len(mq.fields) > 0 { + _spec.Unique = mq.unique != nil && *mq.unique + } + return sqlgraph.CountNodes(ctx, mq.driver, _spec) +} + +func (mq *MachineQuery) sqlExist(ctx context.Context) (bool, error) { + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: machine.Table, + Columns: machine.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + From: mq.sql, + Unique: true, + } + if unique := mq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := mq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID) + for i := range fields { + if fields[i] != machine.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := mq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := mq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := mq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := mq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(mq.driver.Dialect()) + t1 := builder.Table(machine.Table) + columns := mq.fields + if len(columns) == 0 { + columns = machine.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if mq.sql != nil { + selector = mq.sql + selector.Select(selector.Columns(columns...)...) + } + if mq.unique != nil && *mq.unique { + selector.Distinct() + } + for _, p := range mq.predicates { + p(selector) + } + for _, p := range mq.order { + p(selector) + } + if offset := mq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := mq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// MachineGroupBy is the group-by builder for Machine entities. +type MachineGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (mgb *MachineGroupBy) Aggregate(fns ...AggregateFunc) *MachineGroupBy { + mgb.fns = append(mgb.fns, fns...) + return mgb +} + +// Scan applies the group-by query and scans the result into the given value. +func (mgb *MachineGroupBy) Scan(ctx context.Context, v any) error { + query, err := mgb.path(ctx) + if err != nil { + return err + } + mgb.sql = query + return mgb.sqlScan(ctx, v) +} + +func (mgb *MachineGroupBy) sqlScan(ctx context.Context, v any) error { + for _, f := range mgb.fields { + if !machine.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := mgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := mgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (mgb *MachineGroupBy) sqlQuery() *sql.Selector { + selector := mgb.sql.Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(mgb.fields)+len(mgb.fns)) + for _, f := range mgb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(mgb.fields...)...) +} + +// MachineSelect is the builder for selecting fields of Machine entities. +type MachineSelect struct { + *MachineQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (ms *MachineSelect) Scan(ctx context.Context, v any) error { + if err := ms.prepareQuery(ctx); err != nil { + return err + } + ms.sql = ms.MachineQuery.sqlQuery(ctx) + return ms.sqlScan(ctx, v) +} + +func (ms *MachineSelect) sqlScan(ctx context.Context, v any) error { + rows := &sql.Rows{} + query, args := ms.sql.Query() + if err := ms.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go new file mode 100644 index 0000000..de9f8d1 --- /dev/null +++ b/pkg/database/ent/machine_update.go @@ -0,0 +1,1061 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MachineUpdate is the builder for updating Machine entities. +type MachineUpdate struct { + config + hooks []Hook + mutation *MachineMutation +} + +// Where appends a list predicates to the MachineUpdate builder. +func (mu *MachineUpdate) Where(ps ...predicate.Machine) *MachineUpdate { + mu.mutation.Where(ps...) + return mu +} + +// SetCreatedAt sets the "created_at" field. +func (mu *MachineUpdate) SetCreatedAt(t time.Time) *MachineUpdate { + mu.mutation.SetCreatedAt(t) + return mu +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (mu *MachineUpdate) ClearCreatedAt() *MachineUpdate { + mu.mutation.ClearCreatedAt() + return mu +} + +// SetUpdatedAt sets the "updated_at" field. +func (mu *MachineUpdate) SetUpdatedAt(t time.Time) *MachineUpdate { + mu.mutation.SetUpdatedAt(t) + return mu +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (mu *MachineUpdate) ClearUpdatedAt() *MachineUpdate { + mu.mutation.ClearUpdatedAt() + return mu +} + +// SetLastPush sets the "last_push" field. +func (mu *MachineUpdate) SetLastPush(t time.Time) *MachineUpdate { + mu.mutation.SetLastPush(t) + return mu +} + +// ClearLastPush clears the value of the "last_push" field. +func (mu *MachineUpdate) ClearLastPush() *MachineUpdate { + mu.mutation.ClearLastPush() + return mu +} + +// SetLastHeartbeat sets the "last_heartbeat" field. +func (mu *MachineUpdate) SetLastHeartbeat(t time.Time) *MachineUpdate { + mu.mutation.SetLastHeartbeat(t) + return mu +} + +// ClearLastHeartbeat clears the value of the "last_heartbeat" field. +func (mu *MachineUpdate) ClearLastHeartbeat() *MachineUpdate { + mu.mutation.ClearLastHeartbeat() + return mu +} + +// SetMachineId sets the "machineId" field. +func (mu *MachineUpdate) SetMachineId(s string) *MachineUpdate { + mu.mutation.SetMachineId(s) + return mu +} + +// SetPassword sets the "password" field. +func (mu *MachineUpdate) SetPassword(s string) *MachineUpdate { + mu.mutation.SetPassword(s) + return mu +} + +// SetIpAddress sets the "ipAddress" field. +func (mu *MachineUpdate) SetIpAddress(s string) *MachineUpdate { + mu.mutation.SetIpAddress(s) + return mu +} + +// SetScenarios sets the "scenarios" field. +func (mu *MachineUpdate) SetScenarios(s string) *MachineUpdate { + mu.mutation.SetScenarios(s) + return mu +} + +// SetNillableScenarios sets the "scenarios" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableScenarios(s *string) *MachineUpdate { + if s != nil { + mu.SetScenarios(*s) + } + return mu +} + +// ClearScenarios clears the value of the "scenarios" field. +func (mu *MachineUpdate) ClearScenarios() *MachineUpdate { + mu.mutation.ClearScenarios() + return mu +} + +// SetVersion sets the "version" field. +func (mu *MachineUpdate) SetVersion(s string) *MachineUpdate { + mu.mutation.SetVersion(s) + return mu +} + +// SetNillableVersion sets the "version" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableVersion(s *string) *MachineUpdate { + if s != nil { + mu.SetVersion(*s) + } + return mu +} + +// ClearVersion clears the value of the "version" field. +func (mu *MachineUpdate) ClearVersion() *MachineUpdate { + mu.mutation.ClearVersion() + return mu +} + +// SetIsValidated sets the "isValidated" field. +func (mu *MachineUpdate) SetIsValidated(b bool) *MachineUpdate { + mu.mutation.SetIsValidated(b) + return mu +} + +// SetNillableIsValidated sets the "isValidated" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableIsValidated(b *bool) *MachineUpdate { + if b != nil { + mu.SetIsValidated(*b) + } + return mu +} + +// SetStatus sets the "status" field. +func (mu *MachineUpdate) SetStatus(s string) *MachineUpdate { + mu.mutation.SetStatus(s) + return mu +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableStatus(s *string) *MachineUpdate { + if s != nil { + mu.SetStatus(*s) + } + return mu +} + +// ClearStatus clears the value of the "status" field. +func (mu *MachineUpdate) ClearStatus() *MachineUpdate { + mu.mutation.ClearStatus() + return mu +} + +// SetAuthType sets the "auth_type" field. +func (mu *MachineUpdate) SetAuthType(s string) *MachineUpdate { + mu.mutation.SetAuthType(s) + return mu +} + +// SetNillableAuthType sets the "auth_type" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableAuthType(s *string) *MachineUpdate { + if s != nil { + mu.SetAuthType(*s) + } + return mu +} + +// AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. +func (mu *MachineUpdate) AddAlertIDs(ids ...int) *MachineUpdate { + mu.mutation.AddAlertIDs(ids...) + return mu +} + +// AddAlerts adds the "alerts" edges to the Alert entity. +func (mu *MachineUpdate) AddAlerts(a ...*Alert) *MachineUpdate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return mu.AddAlertIDs(ids...) +} + +// Mutation returns the MachineMutation object of the builder. +func (mu *MachineUpdate) Mutation() *MachineMutation { + return mu.mutation +} + +// ClearAlerts clears all "alerts" edges to the Alert entity. +func (mu *MachineUpdate) ClearAlerts() *MachineUpdate { + mu.mutation.ClearAlerts() + return mu +} + +// RemoveAlertIDs removes the "alerts" edge to Alert entities by IDs. +func (mu *MachineUpdate) RemoveAlertIDs(ids ...int) *MachineUpdate { + mu.mutation.RemoveAlertIDs(ids...) + return mu +} + +// RemoveAlerts removes "alerts" edges to Alert entities. +func (mu *MachineUpdate) RemoveAlerts(a ...*Alert) *MachineUpdate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return mu.RemoveAlertIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (mu *MachineUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + mu.defaults() + if len(mu.hooks) == 0 { + if err = mu.check(); err != nil { + return 0, err + } + affected, err = mu.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = mu.check(); err != nil { + return 0, err + } + mu.mutation = mutation + affected, err = mu.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(mu.hooks) - 1; i >= 0; i-- { + if mu.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = mu.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, mu.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (mu *MachineUpdate) SaveX(ctx context.Context) int { + affected, err := mu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (mu *MachineUpdate) Exec(ctx context.Context) error { + _, err := mu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mu *MachineUpdate) ExecX(ctx context.Context) { + if err := mu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (mu *MachineUpdate) defaults() { + if _, ok := mu.mutation.CreatedAt(); !ok && !mu.mutation.CreatedAtCleared() { + v := machine.UpdateDefaultCreatedAt() + mu.mutation.SetCreatedAt(v) + } + if _, ok := mu.mutation.UpdatedAt(); !ok && !mu.mutation.UpdatedAtCleared() { + v := machine.UpdateDefaultUpdatedAt() + mu.mutation.SetUpdatedAt(v) + } + if _, ok := mu.mutation.LastPush(); !ok && !mu.mutation.LastPushCleared() { + v := machine.UpdateDefaultLastPush() + mu.mutation.SetLastPush(v) + } + if _, ok := mu.mutation.LastHeartbeat(); !ok && !mu.mutation.LastHeartbeatCleared() { + v := machine.UpdateDefaultLastHeartbeat() + mu.mutation.SetLastHeartbeat(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mu *MachineUpdate) check() error { + if v, ok := mu.mutation.Scenarios(); ok { + if err := machine.ScenariosValidator(v); err != nil { + return &ValidationError{Name: "scenarios", err: fmt.Errorf(`ent: validator failed for field "Machine.scenarios": %w`, err)} + } + } + return nil +} + +func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: machine.Table, + Columns: machine.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + if ps := mu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := mu.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldCreatedAt, + }) + } + if mu.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: machine.FieldCreatedAt, + }) + } + if value, ok := mu.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldUpdatedAt, + }) + } + if mu.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: machine.FieldUpdatedAt, + }) + } + if value, ok := mu.mutation.LastPush(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldLastPush, + }) + } + if mu.mutation.LastPushCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: machine.FieldLastPush, + }) + } + if value, ok := mu.mutation.LastHeartbeat(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldLastHeartbeat, + }) + } + if mu.mutation.LastHeartbeatCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: machine.FieldLastHeartbeat, + }) + } + if value, ok := mu.mutation.MachineId(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldMachineId, + }) + } + if value, ok := mu.mutation.Password(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldPassword, + }) + } + if value, ok := mu.mutation.IpAddress(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldIpAddress, + }) + } + if value, ok := mu.mutation.Scenarios(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldScenarios, + }) + } + if mu.mutation.ScenariosCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldScenarios, + }) + } + if value, ok := mu.mutation.Version(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldVersion, + }) + } + if mu.mutation.VersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldVersion, + }) + } + if value, ok := mu.mutation.IsValidated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: machine.FieldIsValidated, + }) + } + if value, ok := mu.mutation.Status(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldStatus, + }) + } + if mu.mutation.StatusCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldStatus, + }) + } + if value, ok := mu.mutation.AuthType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldAuthType, + }) + } + if mu.mutation.AlertsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := mu.mutation.RemovedAlertsIDs(); len(nodes) > 0 && !mu.mutation.AlertsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := mu.mutation.AlertsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{machine.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// MachineUpdateOne is the builder for updating a single Machine entity. +type MachineUpdateOne struct { + config + fields []string + hooks []Hook + mutation *MachineMutation +} + +// SetCreatedAt sets the "created_at" field. +func (muo *MachineUpdateOne) SetCreatedAt(t time.Time) *MachineUpdateOne { + muo.mutation.SetCreatedAt(t) + return muo +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (muo *MachineUpdateOne) ClearCreatedAt() *MachineUpdateOne { + muo.mutation.ClearCreatedAt() + return muo +} + +// SetUpdatedAt sets the "updated_at" field. +func (muo *MachineUpdateOne) SetUpdatedAt(t time.Time) *MachineUpdateOne { + muo.mutation.SetUpdatedAt(t) + return muo +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (muo *MachineUpdateOne) ClearUpdatedAt() *MachineUpdateOne { + muo.mutation.ClearUpdatedAt() + return muo +} + +// SetLastPush sets the "last_push" field. +func (muo *MachineUpdateOne) SetLastPush(t time.Time) *MachineUpdateOne { + muo.mutation.SetLastPush(t) + return muo +} + +// ClearLastPush clears the value of the "last_push" field. +func (muo *MachineUpdateOne) ClearLastPush() *MachineUpdateOne { + muo.mutation.ClearLastPush() + return muo +} + +// SetLastHeartbeat sets the "last_heartbeat" field. +func (muo *MachineUpdateOne) SetLastHeartbeat(t time.Time) *MachineUpdateOne { + muo.mutation.SetLastHeartbeat(t) + return muo +} + +// ClearLastHeartbeat clears the value of the "last_heartbeat" field. +func (muo *MachineUpdateOne) ClearLastHeartbeat() *MachineUpdateOne { + muo.mutation.ClearLastHeartbeat() + return muo +} + +// SetMachineId sets the "machineId" field. +func (muo *MachineUpdateOne) SetMachineId(s string) *MachineUpdateOne { + muo.mutation.SetMachineId(s) + return muo +} + +// SetPassword sets the "password" field. +func (muo *MachineUpdateOne) SetPassword(s string) *MachineUpdateOne { + muo.mutation.SetPassword(s) + return muo +} + +// SetIpAddress sets the "ipAddress" field. +func (muo *MachineUpdateOne) SetIpAddress(s string) *MachineUpdateOne { + muo.mutation.SetIpAddress(s) + return muo +} + +// SetScenarios sets the "scenarios" field. +func (muo *MachineUpdateOne) SetScenarios(s string) *MachineUpdateOne { + muo.mutation.SetScenarios(s) + return muo +} + +// SetNillableScenarios sets the "scenarios" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableScenarios(s *string) *MachineUpdateOne { + if s != nil { + muo.SetScenarios(*s) + } + return muo +} + +// ClearScenarios clears the value of the "scenarios" field. +func (muo *MachineUpdateOne) ClearScenarios() *MachineUpdateOne { + muo.mutation.ClearScenarios() + return muo +} + +// SetVersion sets the "version" field. +func (muo *MachineUpdateOne) SetVersion(s string) *MachineUpdateOne { + muo.mutation.SetVersion(s) + return muo +} + +// SetNillableVersion sets the "version" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableVersion(s *string) *MachineUpdateOne { + if s != nil { + muo.SetVersion(*s) + } + return muo +} + +// ClearVersion clears the value of the "version" field. +func (muo *MachineUpdateOne) ClearVersion() *MachineUpdateOne { + muo.mutation.ClearVersion() + return muo +} + +// SetIsValidated sets the "isValidated" field. +func (muo *MachineUpdateOne) SetIsValidated(b bool) *MachineUpdateOne { + muo.mutation.SetIsValidated(b) + return muo +} + +// SetNillableIsValidated sets the "isValidated" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableIsValidated(b *bool) *MachineUpdateOne { + if b != nil { + muo.SetIsValidated(*b) + } + return muo +} + +// SetStatus sets the "status" field. +func (muo *MachineUpdateOne) SetStatus(s string) *MachineUpdateOne { + muo.mutation.SetStatus(s) + return muo +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableStatus(s *string) *MachineUpdateOne { + if s != nil { + muo.SetStatus(*s) + } + return muo +} + +// ClearStatus clears the value of the "status" field. +func (muo *MachineUpdateOne) ClearStatus() *MachineUpdateOne { + muo.mutation.ClearStatus() + return muo +} + +// SetAuthType sets the "auth_type" field. +func (muo *MachineUpdateOne) SetAuthType(s string) *MachineUpdateOne { + muo.mutation.SetAuthType(s) + return muo +} + +// SetNillableAuthType sets the "auth_type" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableAuthType(s *string) *MachineUpdateOne { + if s != nil { + muo.SetAuthType(*s) + } + return muo +} + +// AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. +func (muo *MachineUpdateOne) AddAlertIDs(ids ...int) *MachineUpdateOne { + muo.mutation.AddAlertIDs(ids...) + return muo +} + +// AddAlerts adds the "alerts" edges to the Alert entity. +func (muo *MachineUpdateOne) AddAlerts(a ...*Alert) *MachineUpdateOne { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return muo.AddAlertIDs(ids...) +} + +// Mutation returns the MachineMutation object of the builder. +func (muo *MachineUpdateOne) Mutation() *MachineMutation { + return muo.mutation +} + +// ClearAlerts clears all "alerts" edges to the Alert entity. +func (muo *MachineUpdateOne) ClearAlerts() *MachineUpdateOne { + muo.mutation.ClearAlerts() + return muo +} + +// RemoveAlertIDs removes the "alerts" edge to Alert entities by IDs. +func (muo *MachineUpdateOne) RemoveAlertIDs(ids ...int) *MachineUpdateOne { + muo.mutation.RemoveAlertIDs(ids...) + return muo +} + +// RemoveAlerts removes "alerts" edges to Alert entities. +func (muo *MachineUpdateOne) RemoveAlerts(a ...*Alert) *MachineUpdateOne { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return muo.RemoveAlertIDs(ids...) +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (muo *MachineUpdateOne) Select(field string, fields ...string) *MachineUpdateOne { + muo.fields = append([]string{field}, fields...) + return muo +} + +// Save executes the query and returns the updated Machine entity. +func (muo *MachineUpdateOne) Save(ctx context.Context) (*Machine, error) { + var ( + err error + node *Machine + ) + muo.defaults() + if len(muo.hooks) == 0 { + if err = muo.check(); err != nil { + return nil, err + } + node, err = muo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = muo.check(); err != nil { + return nil, err + } + muo.mutation = mutation + node, err = muo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(muo.hooks) - 1; i >= 0; i-- { + if muo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = muo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, muo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Machine) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from MachineMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (muo *MachineUpdateOne) SaveX(ctx context.Context) *Machine { + node, err := muo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (muo *MachineUpdateOne) Exec(ctx context.Context) error { + _, err := muo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (muo *MachineUpdateOne) ExecX(ctx context.Context) { + if err := muo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (muo *MachineUpdateOne) defaults() { + if _, ok := muo.mutation.CreatedAt(); !ok && !muo.mutation.CreatedAtCleared() { + v := machine.UpdateDefaultCreatedAt() + muo.mutation.SetCreatedAt(v) + } + if _, ok := muo.mutation.UpdatedAt(); !ok && !muo.mutation.UpdatedAtCleared() { + v := machine.UpdateDefaultUpdatedAt() + muo.mutation.SetUpdatedAt(v) + } + if _, ok := muo.mutation.LastPush(); !ok && !muo.mutation.LastPushCleared() { + v := machine.UpdateDefaultLastPush() + muo.mutation.SetLastPush(v) + } + if _, ok := muo.mutation.LastHeartbeat(); !ok && !muo.mutation.LastHeartbeatCleared() { + v := machine.UpdateDefaultLastHeartbeat() + muo.mutation.SetLastHeartbeat(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (muo *MachineUpdateOne) check() error { + if v, ok := muo.mutation.Scenarios(); ok { + if err := machine.ScenariosValidator(v); err != nil { + return &ValidationError{Name: "scenarios", err: fmt.Errorf(`ent: validator failed for field "Machine.scenarios": %w`, err)} + } + } + return nil +} + +func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: machine.Table, + Columns: machine.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: machine.FieldID, + }, + }, + } + id, ok := muo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Machine.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := muo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID) + for _, f := range fields { + if !machine.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != machine.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := muo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := muo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldCreatedAt, + }) + } + if muo.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: machine.FieldCreatedAt, + }) + } + if value, ok := muo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldUpdatedAt, + }) + } + if muo.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: machine.FieldUpdatedAt, + }) + } + if value, ok := muo.mutation.LastPush(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldLastPush, + }) + } + if muo.mutation.LastPushCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: machine.FieldLastPush, + }) + } + if value, ok := muo.mutation.LastHeartbeat(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: machine.FieldLastHeartbeat, + }) + } + if muo.mutation.LastHeartbeatCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: machine.FieldLastHeartbeat, + }) + } + if value, ok := muo.mutation.MachineId(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldMachineId, + }) + } + if value, ok := muo.mutation.Password(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldPassword, + }) + } + if value, ok := muo.mutation.IpAddress(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldIpAddress, + }) + } + if value, ok := muo.mutation.Scenarios(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldScenarios, + }) + } + if muo.mutation.ScenariosCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldScenarios, + }) + } + if value, ok := muo.mutation.Version(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldVersion, + }) + } + if muo.mutation.VersionCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldVersion, + }) + } + if value, ok := muo.mutation.IsValidated(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeBool, + Value: value, + Column: machine.FieldIsValidated, + }) + } + if value, ok := muo.mutation.Status(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldStatus, + }) + } + if muo.mutation.StatusCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Column: machine.FieldStatus, + }) + } + if value, ok := muo.mutation.AuthType(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: machine.FieldAuthType, + }) + } + if muo.mutation.AlertsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := muo.mutation.RemovedAlertsIDs(); len(nodes) > 0 && !muo.mutation.AlertsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := muo.mutation.AlertsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: machine.AlertsTable, + Columns: []string{machine.AlertsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Machine{config: muo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{machine.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/meta.go b/pkg/database/ent/meta.go new file mode 100644 index 0000000..ee2082d --- /dev/null +++ b/pkg/database/ent/meta.go @@ -0,0 +1,182 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" +) + +// Meta is the model entity for the Meta schema. +type Meta struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt *time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt *time.Time `json:"updated_at,omitempty"` + // Key holds the value of the "key" field. + Key string `json:"key,omitempty"` + // Value holds the value of the "value" field. + Value string `json:"value,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the MetaQuery when eager-loading is set. + Edges MetaEdges `json:"edges"` + alert_metas *int +} + +// MetaEdges holds the relations/edges for other nodes in the graph. +type MetaEdges struct { + // Owner holds the value of the owner edge. + Owner *Alert `json:"owner,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// OwnerOrErr returns the Owner value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e MetaEdges) OwnerOrErr() (*Alert, error) { + if e.loadedTypes[0] { + if e.Owner == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: alert.Label} + } + return e.Owner, nil + } + return nil, &NotLoadedError{edge: "owner"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Meta) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case meta.FieldID: + values[i] = new(sql.NullInt64) + case meta.FieldKey, meta.FieldValue: + values[i] = new(sql.NullString) + case meta.FieldCreatedAt, meta.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case meta.ForeignKeys[0]: // alert_metas + values[i] = new(sql.NullInt64) + default: + return nil, fmt.Errorf("unexpected column %q for type Meta", columns[i]) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Meta fields. +func (m *Meta) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case meta.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + m.ID = int(value.Int64) + case meta.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + m.CreatedAt = new(time.Time) + *m.CreatedAt = value.Time + } + case meta.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + m.UpdatedAt = new(time.Time) + *m.UpdatedAt = value.Time + } + case meta.FieldKey: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field key", values[i]) + } else if value.Valid { + m.Key = value.String + } + case meta.FieldValue: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field value", values[i]) + } else if value.Valid { + m.Value = value.String + } + case meta.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field alert_metas", value) + } else if value.Valid { + m.alert_metas = new(int) + *m.alert_metas = int(value.Int64) + } + } + } + return nil +} + +// QueryOwner queries the "owner" edge of the Meta entity. +func (m *Meta) QueryOwner() *AlertQuery { + return (&MetaClient{config: m.config}).QueryOwner(m) +} + +// Update returns a builder for updating this Meta. +// Note that you need to call Meta.Unwrap() before calling this method if this Meta +// was returned from a transaction, and the transaction was committed or rolled back. +func (m *Meta) Update() *MetaUpdateOne { + return (&MetaClient{config: m.config}).UpdateOne(m) +} + +// Unwrap unwraps the Meta entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (m *Meta) Unwrap() *Meta { + _tx, ok := m.config.driver.(*txDriver) + if !ok { + panic("ent: Meta is not a transactional entity") + } + m.config.driver = _tx.drv + return m +} + +// String implements the fmt.Stringer. +func (m *Meta) String() string { + var builder strings.Builder + builder.WriteString("Meta(") + builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) + if v := m.CreatedAt; v != nil { + builder.WriteString("created_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := m.UpdatedAt; v != nil { + builder.WriteString("updated_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("key=") + builder.WriteString(m.Key) + builder.WriteString(", ") + builder.WriteString("value=") + builder.WriteString(m.Value) + builder.WriteByte(')') + return builder.String() +} + +// MetaSlice is a parsable slice of Meta. +type MetaSlice []*Meta + +func (m MetaSlice) config(cfg config) { + for _i := range m { + m[_i].config = cfg + } +} diff --git a/pkg/database/ent/meta/meta.go b/pkg/database/ent/meta/meta.go new file mode 100644 index 0000000..5fea86c --- /dev/null +++ b/pkg/database/ent/meta/meta.go @@ -0,0 +1,76 @@ +// Code generated by ent, DO NOT EDIT. + +package meta + +import ( + "time" +) + +const ( + // Label holds the string label denoting the meta type in the database. + Label = "meta" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldKey holds the string denoting the key field in the database. + FieldKey = "key" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // EdgeOwner holds the string denoting the owner edge name in mutations. + EdgeOwner = "owner" + // Table holds the table name of the meta in the database. + Table = "meta" + // OwnerTable is the table that holds the owner relation/edge. + OwnerTable = "meta" + // OwnerInverseTable is the table name for the Alert entity. + // It exists in this package in order to avoid circular dependency with the "alert" package. + OwnerInverseTable = "alerts" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "alert_metas" +) + +// Columns holds all SQL columns for meta fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldKey, + FieldValue, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "meta" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "alert_metas", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. + UpdateDefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // ValueValidator is a validator for the "value" field. It is called by the builders before save. + ValueValidator func(string) error +) diff --git a/pkg/database/ent/meta/where.go b/pkg/database/ent/meta/where.go new file mode 100644 index 0000000..9938e9a --- /dev/null +++ b/pkg/database/ent/meta/where.go @@ -0,0 +1,524 @@ +// Code generated by ent, DO NOT EDIT. + +package meta + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldID), id)) + }) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldID), id)) + }) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.In(s.C(FieldID), v...)) + }) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + v := make([]any, len(ids)) + for i := range v { + v[i] = ids[i] + } + s.Where(sql.NotIn(s.C(FieldID), v...)) + }) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldID), id)) + }) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldID), id)) + }) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldID), id)) + }) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldID), id)) + }) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// Key applies equality check predicate on the "key" field. It's identical to KeyEQ. +func Key(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldKey), v)) + }) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldValue), v)) + }) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Meta { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Meta { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) + }) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldCreatedAt), v)) + }) +} + +// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. +func CreatedAtIsNil() predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldCreatedAt))) + }) +} + +// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. +func CreatedAtNotNil() predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldCreatedAt))) + }) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Meta { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Meta { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) + }) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) + }) +} + +// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. +func UpdatedAtIsNil() predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.IsNull(s.C(FieldUpdatedAt))) + }) +} + +// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. +func UpdatedAtNotNil() predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NotNull(s.C(FieldUpdatedAt))) + }) +} + +// KeyEQ applies the EQ predicate on the "key" field. +func KeyEQ(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldKey), v)) + }) +} + +// KeyNEQ applies the NEQ predicate on the "key" field. +func KeyNEQ(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldKey), v)) + }) +} + +// KeyIn applies the In predicate on the "key" field. +func KeyIn(vs ...string) predicate.Meta { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldKey), v...)) + }) +} + +// KeyNotIn applies the NotIn predicate on the "key" field. +func KeyNotIn(vs ...string) predicate.Meta { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldKey), v...)) + }) +} + +// KeyGT applies the GT predicate on the "key" field. +func KeyGT(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldKey), v)) + }) +} + +// KeyGTE applies the GTE predicate on the "key" field. +func KeyGTE(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldKey), v)) + }) +} + +// KeyLT applies the LT predicate on the "key" field. +func KeyLT(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldKey), v)) + }) +} + +// KeyLTE applies the LTE predicate on the "key" field. +func KeyLTE(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldKey), v)) + }) +} + +// KeyContains applies the Contains predicate on the "key" field. +func KeyContains(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldKey), v)) + }) +} + +// KeyHasPrefix applies the HasPrefix predicate on the "key" field. +func KeyHasPrefix(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldKey), v)) + }) +} + +// KeyHasSuffix applies the HasSuffix predicate on the "key" field. +func KeyHasSuffix(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldKey), v)) + }) +} + +// KeyEqualFold applies the EqualFold predicate on the "key" field. +func KeyEqualFold(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldKey), v)) + }) +} + +// KeyContainsFold applies the ContainsFold predicate on the "key" field. +func KeyContainsFold(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldKey), v)) + }) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EQ(s.C(FieldValue), v)) + }) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NEQ(s.C(FieldValue), v)) + }) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...string) predicate.Meta { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.In(s.C(FieldValue), v...)) + }) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...string) predicate.Meta { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.NotIn(s.C(FieldValue), v...)) + }) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GT(s.C(FieldValue), v)) + }) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.GTE(s.C(FieldValue), v)) + }) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LT(s.C(FieldValue), v)) + }) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.LTE(s.C(FieldValue), v)) + }) +} + +// ValueContains applies the Contains predicate on the "value" field. +func ValueContains(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.Contains(s.C(FieldValue), v)) + }) +} + +// ValueHasPrefix applies the HasPrefix predicate on the "value" field. +func ValueHasPrefix(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.HasPrefix(s.C(FieldValue), v)) + }) +} + +// ValueHasSuffix applies the HasSuffix predicate on the "value" field. +func ValueHasSuffix(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.HasSuffix(s.C(FieldValue), v)) + }) +} + +// ValueEqualFold applies the EqualFold predicate on the "value" field. +func ValueEqualFold(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.EqualFold(s.C(FieldValue), v)) + }) +} + +// ValueContainsFold applies the ContainsFold predicate on the "value" field. +func ValueContainsFold(v string) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s.Where(sql.ContainsFold(s.C(FieldValue), v)) + }) +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...predicate.Alert) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Meta) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Meta) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Meta) predicate.Meta { + return predicate.Meta(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/pkg/database/ent/meta_create.go b/pkg/database/ent/meta_create.go new file mode 100644 index 0000000..cb7c6fc --- /dev/null +++ b/pkg/database/ent/meta_create.go @@ -0,0 +1,347 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" +) + +// MetaCreate is the builder for creating a Meta entity. +type MetaCreate struct { + config + mutation *MetaMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (mc *MetaCreate) SetCreatedAt(t time.Time) *MetaCreate { + mc.mutation.SetCreatedAt(t) + return mc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (mc *MetaCreate) SetNillableCreatedAt(t *time.Time) *MetaCreate { + if t != nil { + mc.SetCreatedAt(*t) + } + return mc +} + +// SetUpdatedAt sets the "updated_at" field. +func (mc *MetaCreate) SetUpdatedAt(t time.Time) *MetaCreate { + mc.mutation.SetUpdatedAt(t) + return mc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (mc *MetaCreate) SetNillableUpdatedAt(t *time.Time) *MetaCreate { + if t != nil { + mc.SetUpdatedAt(*t) + } + return mc +} + +// SetKey sets the "key" field. +func (mc *MetaCreate) SetKey(s string) *MetaCreate { + mc.mutation.SetKey(s) + return mc +} + +// SetValue sets the "value" field. +func (mc *MetaCreate) SetValue(s string) *MetaCreate { + mc.mutation.SetValue(s) + return mc +} + +// SetOwnerID sets the "owner" edge to the Alert entity by ID. +func (mc *MetaCreate) SetOwnerID(id int) *MetaCreate { + mc.mutation.SetOwnerID(id) + return mc +} + +// SetNillableOwnerID sets the "owner" edge to the Alert entity by ID if the given value is not nil. +func (mc *MetaCreate) SetNillableOwnerID(id *int) *MetaCreate { + if id != nil { + mc = mc.SetOwnerID(*id) + } + return mc +} + +// SetOwner sets the "owner" edge to the Alert entity. +func (mc *MetaCreate) SetOwner(a *Alert) *MetaCreate { + return mc.SetOwnerID(a.ID) +} + +// Mutation returns the MetaMutation object of the builder. +func (mc *MetaCreate) Mutation() *MetaMutation { + return mc.mutation +} + +// Save creates the Meta in the database. +func (mc *MetaCreate) Save(ctx context.Context) (*Meta, error) { + var ( + err error + node *Meta + ) + mc.defaults() + if len(mc.hooks) == 0 { + if err = mc.check(); err != nil { + return nil, err + } + node, err = mc.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = mc.check(); err != nil { + return nil, err + } + mc.mutation = mutation + if node, err = mc.sqlSave(ctx); err != nil { + return nil, err + } + mutation.id = &node.ID + mutation.done = true + return node, err + }) + for i := len(mc.hooks) - 1; i >= 0; i-- { + if mc.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = mc.hooks[i](mut) + } + v, err := mut.Mutate(ctx, mc.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Meta) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from MetaMutation", v) + } + node = nv + } + return node, err +} + +// SaveX calls Save and panics if Save returns an error. +func (mc *MetaCreate) SaveX(ctx context.Context) *Meta { + v, err := mc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mc *MetaCreate) Exec(ctx context.Context) error { + _, err := mc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mc *MetaCreate) ExecX(ctx context.Context) { + if err := mc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (mc *MetaCreate) defaults() { + if _, ok := mc.mutation.CreatedAt(); !ok { + v := meta.DefaultCreatedAt() + mc.mutation.SetCreatedAt(v) + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + v := meta.DefaultUpdatedAt() + mc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mc *MetaCreate) check() error { + if _, ok := mc.mutation.Key(); !ok { + return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "Meta.key"`)} + } + if _, ok := mc.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "Meta.value"`)} + } + if v, ok := mc.mutation.Value(); ok { + if err := meta.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "Meta.value": %w`, err)} + } + } + return nil +} + +func (mc *MetaCreate) sqlSave(ctx context.Context) (*Meta, error) { + _node, _spec := mc.createSpec() + if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + return _node, nil +} + +func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { + var ( + _node = &Meta{config: mc.config} + _spec = &sqlgraph.CreateSpec{ + Table: meta.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + } + ) + if value, ok := mc.mutation.CreatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldCreatedAt, + }) + _node.CreatedAt = &value + } + if value, ok := mc.mutation.UpdatedAt(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldUpdatedAt, + }) + _node.UpdatedAt = &value + } + if value, ok := mc.mutation.Key(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldKey, + }) + _node.Key = value + } + if value, ok := mc.mutation.Value(); ok { + _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldValue, + }) + _node.Value = value + } + if nodes := mc.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: meta.OwnerTable, + Columns: []string{meta.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.alert_metas = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// MetaCreateBulk is the builder for creating many Meta entities in bulk. +type MetaCreateBulk struct { + config + builders []*MetaCreate +} + +// Save creates the Meta entities in the database. +func (mcb *MetaCreateBulk) Save(ctx context.Context) ([]*Meta, error) { + specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) + nodes := make([]*Meta, len(mcb.builders)) + mutators := make([]Mutator, len(mcb.builders)) + for i := range mcb.builders { + func(i int, root context.Context) { + builder := mcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + nodes[i], specs[i] = builder.createSpec() + var err error + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, mcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (mcb *MetaCreateBulk) SaveX(ctx context.Context) []*Meta { + v, err := mcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mcb *MetaCreateBulk) Exec(ctx context.Context) error { + _, err := mcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mcb *MetaCreateBulk) ExecX(ctx context.Context) { + if err := mcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/meta_delete.go b/pkg/database/ent/meta_delete.go new file mode 100644 index 0000000..e1e49d2 --- /dev/null +++ b/pkg/database/ent/meta_delete.go @@ -0,0 +1,115 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetaDelete is the builder for deleting a Meta entity. +type MetaDelete struct { + config + hooks []Hook + mutation *MetaMutation +} + +// Where appends a list predicates to the MetaDelete builder. +func (md *MetaDelete) Where(ps ...predicate.Meta) *MetaDelete { + md.mutation.Where(ps...) + return md +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (md *MetaDelete) Exec(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + if len(md.hooks) == 0 { + affected, err = md.sqlExec(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + md.mutation = mutation + affected, err = md.sqlExec(ctx) + mutation.done = true + return affected, err + }) + for i := len(md.hooks) - 1; i >= 0; i-- { + if md.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = md.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, md.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// ExecX is like Exec, but panics if an error occurs. +func (md *MetaDelete) ExecX(ctx context.Context) int { + n, err := md.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (md *MetaDelete) sqlExec(ctx context.Context) (int, error) { + _spec := &sqlgraph.DeleteSpec{ + Node: &sqlgraph.NodeSpec{ + Table: meta.Table, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + if ps := md.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, md.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return affected, err +} + +// MetaDeleteOne is the builder for deleting a single Meta entity. +type MetaDeleteOne struct { + md *MetaDelete +} + +// Exec executes the deletion query. +func (mdo *MetaDeleteOne) Exec(ctx context.Context) error { + n, err := mdo.md.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{meta.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (mdo *MetaDeleteOne) ExecX(ctx context.Context) { + mdo.md.ExecX(ctx) +} diff --git a/pkg/database/ent/meta_query.go b/pkg/database/ent/meta_query.go new file mode 100644 index 0000000..96c7721 --- /dev/null +++ b/pkg/database/ent/meta_query.go @@ -0,0 +1,613 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetaQuery is the builder for querying Meta entities. +type MetaQuery struct { + config + limit *int + offset *int + unique *bool + order []OrderFunc + fields []string + predicates []predicate.Meta + withOwner *AlertQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the MetaQuery builder. +func (mq *MetaQuery) Where(ps ...predicate.Meta) *MetaQuery { + mq.predicates = append(mq.predicates, ps...) + return mq +} + +// Limit adds a limit step to the query. +func (mq *MetaQuery) Limit(limit int) *MetaQuery { + mq.limit = &limit + return mq +} + +// Offset adds an offset step to the query. +func (mq *MetaQuery) Offset(offset int) *MetaQuery { + mq.offset = &offset + return mq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (mq *MetaQuery) Unique(unique bool) *MetaQuery { + mq.unique = &unique + return mq +} + +// Order adds an order step to the query. +func (mq *MetaQuery) Order(o ...OrderFunc) *MetaQuery { + mq.order = append(mq.order, o...) + return mq +} + +// QueryOwner chains the current query on the "owner" edge. +func (mq *MetaQuery) QueryOwner() *AlertQuery { + query := &AlertQuery{config: mq.config} + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := mq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(meta.Table, meta.FieldID, selector), + sqlgraph.To(alert.Table, alert.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, meta.OwnerTable, meta.OwnerColumn), + ) + fromU = sqlgraph.SetNeighbors(mq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Meta entity from the query. +// Returns a *NotFoundError when no Meta was found. +func (mq *MetaQuery) First(ctx context.Context) (*Meta, error) { + nodes, err := mq.Limit(1).All(ctx) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{meta.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (mq *MetaQuery) FirstX(ctx context.Context) *Meta { + node, err := mq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Meta ID from the query. +// Returns a *NotFoundError when no Meta ID was found. +func (mq *MetaQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(1).IDs(ctx); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{meta.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (mq *MetaQuery) FirstIDX(ctx context.Context) int { + id, err := mq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Meta entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Meta entity is found. +// Returns a *NotFoundError when no Meta entities are found. +func (mq *MetaQuery) Only(ctx context.Context) (*Meta, error) { + nodes, err := mq.Limit(2).All(ctx) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{meta.Label} + default: + return nil, &NotSingularError{meta.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (mq *MetaQuery) OnlyX(ctx context.Context) *Meta { + node, err := mq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Meta ID in the query. +// Returns a *NotSingularError when more than one Meta ID is found. +// Returns a *NotFoundError when no entities are found. +func (mq *MetaQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(2).IDs(ctx); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{meta.Label} + default: + err = &NotSingularError{meta.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (mq *MetaQuery) OnlyIDX(ctx context.Context) int { + id, err := mq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of MetaSlice. +func (mq *MetaQuery) All(ctx context.Context) ([]*Meta, error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + return mq.sqlAll(ctx) +} + +// AllX is like All, but panics if an error occurs. +func (mq *MetaQuery) AllX(ctx context.Context) []*Meta { + nodes, err := mq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Meta IDs. +func (mq *MetaQuery) IDs(ctx context.Context) ([]int, error) { + var ids []int + if err := mq.Select(meta.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (mq *MetaQuery) IDsX(ctx context.Context) []int { + ids, err := mq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (mq *MetaQuery) Count(ctx context.Context) (int, error) { + if err := mq.prepareQuery(ctx); err != nil { + return 0, err + } + return mq.sqlCount(ctx) +} + +// CountX is like Count, but panics if an error occurs. +func (mq *MetaQuery) CountX(ctx context.Context) int { + count, err := mq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (mq *MetaQuery) Exist(ctx context.Context) (bool, error) { + if err := mq.prepareQuery(ctx); err != nil { + return false, err + } + return mq.sqlExist(ctx) +} + +// ExistX is like Exist, but panics if an error occurs. +func (mq *MetaQuery) ExistX(ctx context.Context) bool { + exist, err := mq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the MetaQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (mq *MetaQuery) Clone() *MetaQuery { + if mq == nil { + return nil + } + return &MetaQuery{ + config: mq.config, + limit: mq.limit, + offset: mq.offset, + order: append([]OrderFunc{}, mq.order...), + predicates: append([]predicate.Meta{}, mq.predicates...), + withOwner: mq.withOwner.Clone(), + // clone intermediate query. + sql: mq.sql.Clone(), + path: mq.path, + unique: mq.unique, + } +} + +// WithOwner tells the query-builder to eager-load the nodes that are connected to +// the "owner" edge. The optional arguments are used to configure the query builder of the edge. +func (mq *MetaQuery) WithOwner(opts ...func(*AlertQuery)) *MetaQuery { + query := &AlertQuery{config: mq.config} + for _, opt := range opts { + opt(query) + } + mq.withOwner = query + return mq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Meta.Query(). +// GroupBy(meta.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (mq *MetaQuery) GroupBy(field string, fields ...string) *MetaGroupBy { + grbuild := &MetaGroupBy{config: mq.config} + grbuild.fields = append([]string{field}, fields...) + grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + return mq.sqlQuery(ctx), nil + } + grbuild.label = meta.Label + grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Meta.Query(). +// Select(meta.FieldCreatedAt). +// Scan(ctx, &v) +func (mq *MetaQuery) Select(fields ...string) *MetaSelect { + mq.fields = append(mq.fields, fields...) + selbuild := &MetaSelect{MetaQuery: mq} + selbuild.label = meta.Label + selbuild.flds, selbuild.scan = &mq.fields, selbuild.Scan + return selbuild +} + +func (mq *MetaQuery) prepareQuery(ctx context.Context) error { + for _, f := range mq.fields { + if !meta.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if mq.path != nil { + prev, err := mq.path(ctx) + if err != nil { + return err + } + mq.sql = prev + } + return nil +} + +func (mq *MetaQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Meta, error) { + var ( + nodes = []*Meta{} + withFKs = mq.withFKs + _spec = mq.querySpec() + loadedTypes = [1]bool{ + mq.withOwner != nil, + } + ) + if mq.withOwner != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, meta.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Meta).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Meta{config: mq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, mq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := mq.withOwner; query != nil { + if err := mq.loadOwner(ctx, query, nodes, nil, + func(n *Meta, e *Alert) { n.Edges.Owner = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (mq *MetaQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []*Meta, init func(*Meta), assign func(*Meta, *Alert)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Meta) + for i := range nodes { + if nodes[i].alert_metas == nil { + continue + } + fk := *nodes[i].alert_metas + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + query.Where(alert.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "alert_metas" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (mq *MetaQuery) sqlCount(ctx context.Context) (int, error) { + _spec := mq.querySpec() + _spec.Node.Columns = mq.fields + if len(mq.fields) > 0 { + _spec.Unique = mq.unique != nil && *mq.unique + } + return sqlgraph.CountNodes(ctx, mq.driver, _spec) +} + +func (mq *MetaQuery) sqlExist(ctx context.Context) (bool, error) { + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec { + _spec := &sqlgraph.QuerySpec{ + Node: &sqlgraph.NodeSpec{ + Table: meta.Table, + Columns: meta.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + From: mq.sql, + Unique: true, + } + if unique := mq.unique; unique != nil { + _spec.Unique = *unique + } + if fields := mq.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, meta.FieldID) + for i := range fields { + if fields[i] != meta.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := mq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := mq.limit; limit != nil { + _spec.Limit = *limit + } + if offset := mq.offset; offset != nil { + _spec.Offset = *offset + } + if ps := mq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(mq.driver.Dialect()) + t1 := builder.Table(meta.Table) + columns := mq.fields + if len(columns) == 0 { + columns = meta.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if mq.sql != nil { + selector = mq.sql + selector.Select(selector.Columns(columns...)...) + } + if mq.unique != nil && *mq.unique { + selector.Distinct() + } + for _, p := range mq.predicates { + p(selector) + } + for _, p := range mq.order { + p(selector) + } + if offset := mq.offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := mq.limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// MetaGroupBy is the group-by builder for Meta entities. +type MetaGroupBy struct { + config + selector + fields []string + fns []AggregateFunc + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (mgb *MetaGroupBy) Aggregate(fns ...AggregateFunc) *MetaGroupBy { + mgb.fns = append(mgb.fns, fns...) + return mgb +} + +// Scan applies the group-by query and scans the result into the given value. +func (mgb *MetaGroupBy) Scan(ctx context.Context, v any) error { + query, err := mgb.path(ctx) + if err != nil { + return err + } + mgb.sql = query + return mgb.sqlScan(ctx, v) +} + +func (mgb *MetaGroupBy) sqlScan(ctx context.Context, v any) error { + for _, f := range mgb.fields { + if !meta.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} + } + } + selector := mgb.sqlQuery() + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := mgb.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +func (mgb *MetaGroupBy) sqlQuery() *sql.Selector { + selector := mgb.sql.Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + // If no columns were selected in a custom aggregation function, the default + // selection is the fields used for "group-by", and the aggregation functions. + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(mgb.fields)+len(mgb.fns)) + for _, f := range mgb.fields { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + return selector.GroupBy(selector.Columns(mgb.fields...)...) +} + +// MetaSelect is the builder for selecting fields of Meta entities. +type MetaSelect struct { + *MetaQuery + selector + // intermediate query (i.e. traversal path). + sql *sql.Selector +} + +// Scan applies the selector query and scans the result into the given value. +func (ms *MetaSelect) Scan(ctx context.Context, v any) error { + if err := ms.prepareQuery(ctx); err != nil { + return err + } + ms.sql = ms.MetaQuery.sqlQuery(ctx) + return ms.sqlScan(ctx, v) +} + +func (ms *MetaSelect) sqlScan(ctx context.Context, v any) error { + rows := &sql.Rows{} + query, args := ms.sql.Query() + if err := ms.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go new file mode 100644 index 0000000..1cbdc2c --- /dev/null +++ b/pkg/database/ent/meta_update.go @@ -0,0 +1,577 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetaUpdate is the builder for updating Meta entities. +type MetaUpdate struct { + config + hooks []Hook + mutation *MetaMutation +} + +// Where appends a list predicates to the MetaUpdate builder. +func (mu *MetaUpdate) Where(ps ...predicate.Meta) *MetaUpdate { + mu.mutation.Where(ps...) + return mu +} + +// SetCreatedAt sets the "created_at" field. +func (mu *MetaUpdate) SetCreatedAt(t time.Time) *MetaUpdate { + mu.mutation.SetCreatedAt(t) + return mu +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (mu *MetaUpdate) ClearCreatedAt() *MetaUpdate { + mu.mutation.ClearCreatedAt() + return mu +} + +// SetUpdatedAt sets the "updated_at" field. +func (mu *MetaUpdate) SetUpdatedAt(t time.Time) *MetaUpdate { + mu.mutation.SetUpdatedAt(t) + return mu +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (mu *MetaUpdate) ClearUpdatedAt() *MetaUpdate { + mu.mutation.ClearUpdatedAt() + return mu +} + +// SetKey sets the "key" field. +func (mu *MetaUpdate) SetKey(s string) *MetaUpdate { + mu.mutation.SetKey(s) + return mu +} + +// SetValue sets the "value" field. +func (mu *MetaUpdate) SetValue(s string) *MetaUpdate { + mu.mutation.SetValue(s) + return mu +} + +// SetOwnerID sets the "owner" edge to the Alert entity by ID. +func (mu *MetaUpdate) SetOwnerID(id int) *MetaUpdate { + mu.mutation.SetOwnerID(id) + return mu +} + +// SetNillableOwnerID sets the "owner" edge to the Alert entity by ID if the given value is not nil. +func (mu *MetaUpdate) SetNillableOwnerID(id *int) *MetaUpdate { + if id != nil { + mu = mu.SetOwnerID(*id) + } + return mu +} + +// SetOwner sets the "owner" edge to the Alert entity. +func (mu *MetaUpdate) SetOwner(a *Alert) *MetaUpdate { + return mu.SetOwnerID(a.ID) +} + +// Mutation returns the MetaMutation object of the builder. +func (mu *MetaUpdate) Mutation() *MetaMutation { + return mu.mutation +} + +// ClearOwner clears the "owner" edge to the Alert entity. +func (mu *MetaUpdate) ClearOwner() *MetaUpdate { + mu.mutation.ClearOwner() + return mu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (mu *MetaUpdate) Save(ctx context.Context) (int, error) { + var ( + err error + affected int + ) + mu.defaults() + if len(mu.hooks) == 0 { + if err = mu.check(); err != nil { + return 0, err + } + affected, err = mu.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = mu.check(); err != nil { + return 0, err + } + mu.mutation = mutation + affected, err = mu.sqlSave(ctx) + mutation.done = true + return affected, err + }) + for i := len(mu.hooks) - 1; i >= 0; i-- { + if mu.hooks[i] == nil { + return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = mu.hooks[i](mut) + } + if _, err := mut.Mutate(ctx, mu.mutation); err != nil { + return 0, err + } + } + return affected, err +} + +// SaveX is like Save, but panics if an error occurs. +func (mu *MetaUpdate) SaveX(ctx context.Context) int { + affected, err := mu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (mu *MetaUpdate) Exec(ctx context.Context) error { + _, err := mu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mu *MetaUpdate) ExecX(ctx context.Context) { + if err := mu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (mu *MetaUpdate) defaults() { + if _, ok := mu.mutation.CreatedAt(); !ok && !mu.mutation.CreatedAtCleared() { + v := meta.UpdateDefaultCreatedAt() + mu.mutation.SetCreatedAt(v) + } + if _, ok := mu.mutation.UpdatedAt(); !ok && !mu.mutation.UpdatedAtCleared() { + v := meta.UpdateDefaultUpdatedAt() + mu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mu *MetaUpdate) check() error { + if v, ok := mu.mutation.Value(); ok { + if err := meta.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "Meta.value": %w`, err)} + } + } + return nil +} + +func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: meta.Table, + Columns: meta.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + if ps := mu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := mu.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldCreatedAt, + }) + } + if mu.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: meta.FieldCreatedAt, + }) + } + if value, ok := mu.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldUpdatedAt, + }) + } + if mu.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: meta.FieldUpdatedAt, + }) + } + if value, ok := mu.mutation.Key(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldKey, + }) + } + if value, ok := mu.mutation.Value(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldValue, + }) + } + if mu.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: meta.OwnerTable, + Columns: []string{meta.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := mu.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: meta.OwnerTable, + Columns: []string{meta.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{meta.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + return n, nil +} + +// MetaUpdateOne is the builder for updating a single Meta entity. +type MetaUpdateOne struct { + config + fields []string + hooks []Hook + mutation *MetaMutation +} + +// SetCreatedAt sets the "created_at" field. +func (muo *MetaUpdateOne) SetCreatedAt(t time.Time) *MetaUpdateOne { + muo.mutation.SetCreatedAt(t) + return muo +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (muo *MetaUpdateOne) ClearCreatedAt() *MetaUpdateOne { + muo.mutation.ClearCreatedAt() + return muo +} + +// SetUpdatedAt sets the "updated_at" field. +func (muo *MetaUpdateOne) SetUpdatedAt(t time.Time) *MetaUpdateOne { + muo.mutation.SetUpdatedAt(t) + return muo +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (muo *MetaUpdateOne) ClearUpdatedAt() *MetaUpdateOne { + muo.mutation.ClearUpdatedAt() + return muo +} + +// SetKey sets the "key" field. +func (muo *MetaUpdateOne) SetKey(s string) *MetaUpdateOne { + muo.mutation.SetKey(s) + return muo +} + +// SetValue sets the "value" field. +func (muo *MetaUpdateOne) SetValue(s string) *MetaUpdateOne { + muo.mutation.SetValue(s) + return muo +} + +// SetOwnerID sets the "owner" edge to the Alert entity by ID. +func (muo *MetaUpdateOne) SetOwnerID(id int) *MetaUpdateOne { + muo.mutation.SetOwnerID(id) + return muo +} + +// SetNillableOwnerID sets the "owner" edge to the Alert entity by ID if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableOwnerID(id *int) *MetaUpdateOne { + if id != nil { + muo = muo.SetOwnerID(*id) + } + return muo +} + +// SetOwner sets the "owner" edge to the Alert entity. +func (muo *MetaUpdateOne) SetOwner(a *Alert) *MetaUpdateOne { + return muo.SetOwnerID(a.ID) +} + +// Mutation returns the MetaMutation object of the builder. +func (muo *MetaUpdateOne) Mutation() *MetaMutation { + return muo.mutation +} + +// ClearOwner clears the "owner" edge to the Alert entity. +func (muo *MetaUpdateOne) ClearOwner() *MetaUpdateOne { + muo.mutation.ClearOwner() + return muo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (muo *MetaUpdateOne) Select(field string, fields ...string) *MetaUpdateOne { + muo.fields = append([]string{field}, fields...) + return muo +} + +// Save executes the query and returns the updated Meta entity. +func (muo *MetaUpdateOne) Save(ctx context.Context) (*Meta, error) { + var ( + err error + node *Meta + ) + muo.defaults() + if len(muo.hooks) == 0 { + if err = muo.check(); err != nil { + return nil, err + } + node, err = muo.sqlSave(ctx) + } else { + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err = muo.check(); err != nil { + return nil, err + } + muo.mutation = mutation + node, err = muo.sqlSave(ctx) + mutation.done = true + return node, err + }) + for i := len(muo.hooks) - 1; i >= 0; i-- { + if muo.hooks[i] == nil { + return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = muo.hooks[i](mut) + } + v, err := mut.Mutate(ctx, muo.mutation) + if err != nil { + return nil, err + } + nv, ok := v.(*Meta) + if !ok { + return nil, fmt.Errorf("unexpected node type %T returned from MetaMutation", v) + } + node = nv + } + return node, err +} + +// SaveX is like Save, but panics if an error occurs. +func (muo *MetaUpdateOne) SaveX(ctx context.Context) *Meta { + node, err := muo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (muo *MetaUpdateOne) Exec(ctx context.Context) error { + _, err := muo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (muo *MetaUpdateOne) ExecX(ctx context.Context) { + if err := muo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (muo *MetaUpdateOne) defaults() { + if _, ok := muo.mutation.CreatedAt(); !ok && !muo.mutation.CreatedAtCleared() { + v := meta.UpdateDefaultCreatedAt() + muo.mutation.SetCreatedAt(v) + } + if _, ok := muo.mutation.UpdatedAt(); !ok && !muo.mutation.UpdatedAtCleared() { + v := meta.UpdateDefaultUpdatedAt() + muo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (muo *MetaUpdateOne) check() error { + if v, ok := muo.mutation.Value(); ok { + if err := meta.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "Meta.value": %w`, err)} + } + } + return nil +} + +func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) { + _spec := &sqlgraph.UpdateSpec{ + Node: &sqlgraph.NodeSpec{ + Table: meta.Table, + Columns: meta.Columns, + ID: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: meta.FieldID, + }, + }, + } + id, ok := muo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Meta.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := muo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, meta.FieldID) + for _, f := range fields { + if !meta.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != meta.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := muo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := muo.mutation.CreatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldCreatedAt, + }) + } + if muo.mutation.CreatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: meta.FieldCreatedAt, + }) + } + if value, ok := muo.mutation.UpdatedAt(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Value: value, + Column: meta.FieldUpdatedAt, + }) + } + if muo.mutation.UpdatedAtCleared() { + _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ + Type: field.TypeTime, + Column: meta.FieldUpdatedAt, + }) + } + if value, ok := muo.mutation.Key(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldKey, + }) + } + if value, ok := muo.mutation.Value(); ok { + _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ + Type: field.TypeString, + Value: value, + Column: meta.FieldValue, + }) + } + if muo.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: meta.OwnerTable, + Columns: []string{meta.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := muo.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: meta.OwnerTable, + Columns: []string{meta.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: &sqlgraph.FieldSpec{ + Type: field.TypeInt, + Column: alert.FieldID, + }, + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Meta{config: muo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{meta.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} diff --git a/pkg/database/ent/migrate/migrate.go b/pkg/database/ent/migrate/migrate.go new file mode 100644 index 0000000..1956a6b --- /dev/null +++ b/pkg/database/ent/migrate/migrate.go @@ -0,0 +1,64 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go new file mode 100644 index 0000000..b6def08 --- /dev/null +++ b/pkg/database/ent/migrate/schema.go @@ -0,0 +1,214 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // AlertsColumns holds the columns for the "alerts" table. + AlertsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime, Nullable: true}, + {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "scenario", Type: field.TypeString}, + {Name: "bucket_id", Type: field.TypeString, Nullable: true, Default: ""}, + {Name: "message", Type: field.TypeString, Nullable: true, Default: ""}, + {Name: "events_count", Type: field.TypeInt32, Nullable: true, Default: 0}, + {Name: "started_at", Type: field.TypeTime, Nullable: true}, + {Name: "stopped_at", Type: field.TypeTime, Nullable: true}, + {Name: "source_ip", Type: field.TypeString, Nullable: true}, + {Name: "source_range", Type: field.TypeString, Nullable: true}, + {Name: "source_as_number", Type: field.TypeString, Nullable: true}, + {Name: "source_as_name", Type: field.TypeString, Nullable: true}, + {Name: "source_country", Type: field.TypeString, Nullable: true}, + {Name: "source_latitude", Type: field.TypeFloat32, Nullable: true}, + {Name: "source_longitude", Type: field.TypeFloat32, Nullable: true}, + {Name: "source_scope", Type: field.TypeString, Nullable: true}, + {Name: "source_value", Type: field.TypeString, Nullable: true}, + {Name: "capacity", Type: field.TypeInt32, Nullable: true}, + {Name: "leak_speed", Type: field.TypeString, Nullable: true}, + {Name: "scenario_version", Type: field.TypeString, Nullable: true}, + {Name: "scenario_hash", Type: field.TypeString, Nullable: true}, + {Name: "simulated", Type: field.TypeBool, Default: false}, + {Name: "machine_alerts", Type: field.TypeInt, Nullable: true}, + } + // AlertsTable holds the schema information for the "alerts" table. + AlertsTable = &schema.Table{ + Name: "alerts", + Columns: AlertsColumns, + PrimaryKey: []*schema.Column{AlertsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "alerts_machines_alerts", + Columns: []*schema.Column{AlertsColumns[23]}, + RefColumns: []*schema.Column{MachinesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "alert_id", + Unique: false, + Columns: []*schema.Column{AlertsColumns[0]}, + }, + }, + } + // BouncersColumns holds the columns for the "bouncers" table. + BouncersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime, Nullable: true}, + {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "name", Type: field.TypeString, Unique: true}, + {Name: "api_key", Type: field.TypeString}, + {Name: "revoked", Type: field.TypeBool}, + {Name: "ip_address", Type: field.TypeString, Nullable: true, Default: ""}, + {Name: "type", Type: field.TypeString, Nullable: true}, + {Name: "version", Type: field.TypeString, Nullable: true}, + {Name: "until", Type: field.TypeTime, Nullable: true}, + {Name: "last_pull", Type: field.TypeTime}, + {Name: "auth_type", Type: field.TypeString, Default: "api-key"}, + } + // BouncersTable holds the schema information for the "bouncers" table. + BouncersTable = &schema.Table{ + Name: "bouncers", + Columns: BouncersColumns, + PrimaryKey: []*schema.Column{BouncersColumns[0]}, + } + // DecisionsColumns holds the columns for the "decisions" table. + DecisionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime, Nullable: true}, + {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"mysql": "datetime"}}, + {Name: "scenario", Type: field.TypeString}, + {Name: "type", Type: field.TypeString}, + {Name: "start_ip", Type: field.TypeInt64, Nullable: true}, + {Name: "end_ip", Type: field.TypeInt64, Nullable: true}, + {Name: "start_suffix", Type: field.TypeInt64, Nullable: true}, + {Name: "end_suffix", Type: field.TypeInt64, Nullable: true}, + {Name: "ip_size", Type: field.TypeInt64, Nullable: true}, + {Name: "scope", Type: field.TypeString}, + {Name: "value", Type: field.TypeString}, + {Name: "origin", Type: field.TypeString}, + {Name: "simulated", Type: field.TypeBool, Default: false}, + {Name: "alert_decisions", Type: field.TypeInt, Nullable: true}, + } + // DecisionsTable holds the schema information for the "decisions" table. + DecisionsTable = &schema.Table{ + Name: "decisions", + Columns: DecisionsColumns, + PrimaryKey: []*schema.Column{DecisionsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "decisions_alerts_decisions", + Columns: []*schema.Column{DecisionsColumns[15]}, + RefColumns: []*schema.Column{AlertsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + Indexes: []*schema.Index{ + { + Name: "decision_start_ip_end_ip", + Unique: false, + Columns: []*schema.Column{DecisionsColumns[6], DecisionsColumns[7]}, + }, + { + Name: "decision_value", + Unique: false, + Columns: []*schema.Column{DecisionsColumns[12]}, + }, + { + Name: "decision_until", + Unique: false, + Columns: []*schema.Column{DecisionsColumns[3]}, + }, + }, + } + // EventsColumns holds the columns for the "events" table. + EventsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime, Nullable: true}, + {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "time", Type: field.TypeTime}, + {Name: "serialized", Type: field.TypeString, Size: 8191}, + {Name: "alert_events", Type: field.TypeInt, Nullable: true}, + } + // EventsTable holds the schema information for the "events" table. + EventsTable = &schema.Table{ + Name: "events", + Columns: EventsColumns, + PrimaryKey: []*schema.Column{EventsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "events_alerts_events", + Columns: []*schema.Column{EventsColumns[5]}, + RefColumns: []*schema.Column{AlertsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } + // MachinesColumns holds the columns for the "machines" table. + MachinesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime, Nullable: true}, + {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "last_push", Type: field.TypeTime, Nullable: true}, + {Name: "last_heartbeat", Type: field.TypeTime, Nullable: true}, + {Name: "machine_id", Type: field.TypeString, Unique: true}, + {Name: "password", Type: field.TypeString}, + {Name: "ip_address", Type: field.TypeString}, + {Name: "scenarios", Type: field.TypeString, Nullable: true, Size: 4095}, + {Name: "version", Type: field.TypeString, Nullable: true}, + {Name: "is_validated", Type: field.TypeBool, Default: false}, + {Name: "status", Type: field.TypeString, Nullable: true}, + {Name: "auth_type", Type: field.TypeString, Default: "password"}, + } + // MachinesTable holds the schema information for the "machines" table. + MachinesTable = &schema.Table{ + Name: "machines", + Columns: MachinesColumns, + PrimaryKey: []*schema.Column{MachinesColumns[0]}, + } + // MetaColumns holds the columns for the "meta" table. + MetaColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime, Nullable: true}, + {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "key", Type: field.TypeString}, + {Name: "value", Type: field.TypeString, Size: 4095}, + {Name: "alert_metas", Type: field.TypeInt, Nullable: true}, + } + // MetaTable holds the schema information for the "meta" table. + MetaTable = &schema.Table{ + Name: "meta", + Columns: MetaColumns, + PrimaryKey: []*schema.Column{MetaColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "meta_alerts_metas", + Columns: []*schema.Column{MetaColumns[5]}, + RefColumns: []*schema.Column{AlertsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + AlertsTable, + BouncersTable, + DecisionsTable, + EventsTable, + MachinesTable, + MetaTable, + } +) + +func init() { + AlertsTable.ForeignKeys[0].RefTable = MachinesTable + DecisionsTable.ForeignKeys[0].RefTable = AlertsTable + EventsTable.ForeignKeys[0].RefTable = AlertsTable + MetaTable.ForeignKeys[0].RefTable = AlertsTable +} diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go new file mode 100644 index 0000000..646c078 --- /dev/null +++ b/pkg/database/ent/mutation.go @@ -0,0 +1,6995 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + + "entgo.io/ent" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeAlert = "Alert" + TypeBouncer = "Bouncer" + TypeDecision = "Decision" + TypeEvent = "Event" + TypeMachine = "Machine" + TypeMeta = "Meta" +) + +// AlertMutation represents an operation that mutates the Alert nodes in the graph. +type AlertMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + scenario *string + bucketId *string + message *string + eventsCount *int32 + addeventsCount *int32 + startedAt *time.Time + stoppedAt *time.Time + sourceIp *string + sourceRange *string + sourceAsNumber *string + sourceAsName *string + sourceCountry *string + sourceLatitude *float32 + addsourceLatitude *float32 + sourceLongitude *float32 + addsourceLongitude *float32 + sourceScope *string + sourceValue *string + capacity *int32 + addcapacity *int32 + leakSpeed *string + scenarioVersion *string + scenarioHash *string + simulated *bool + clearedFields map[string]struct{} + owner *int + clearedowner bool + decisions map[int]struct{} + removeddecisions map[int]struct{} + cleareddecisions bool + events map[int]struct{} + removedevents map[int]struct{} + clearedevents bool + metas map[int]struct{} + removedmetas map[int]struct{} + clearedmetas bool + done bool + oldValue func(context.Context) (*Alert, error) + predicates []predicate.Alert +} + +var _ ent.Mutation = (*AlertMutation)(nil) + +// alertOption allows management of the mutation configuration using functional options. +type alertOption func(*AlertMutation) + +// newAlertMutation creates new mutation for the Alert entity. +func newAlertMutation(c config, op Op, opts ...alertOption) *AlertMutation { + m := &AlertMutation{ + config: c, + op: op, + typ: TypeAlert, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAlertID sets the ID field of the mutation. +func withAlertID(id int) alertOption { + return func(m *AlertMutation) { + var ( + err error + once sync.Once + value *Alert + ) + m.oldValue = func(ctx context.Context) (*Alert, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Alert.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAlert sets the old Alert of the mutation. +func withAlert(node *Alert) alertOption { + return func(m *AlertMutation) { + m.oldValue = func(context.Context) (*Alert, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AlertMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AlertMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AlertMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AlertMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Alert.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *AlertMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AlertMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (m *AlertMutation) ClearCreatedAt() { + m.created_at = nil + m.clearedFields[alert.FieldCreatedAt] = struct{}{} +} + +// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. +func (m *AlertMutation) CreatedAtCleared() bool { + _, ok := m.clearedFields[alert.FieldCreatedAt] + return ok +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AlertMutation) ResetCreatedAt() { + m.created_at = nil + delete(m.clearedFields, alert.FieldCreatedAt) +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *AlertMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *AlertMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (m *AlertMutation) ClearUpdatedAt() { + m.updated_at = nil + m.clearedFields[alert.FieldUpdatedAt] = struct{}{} +} + +// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. +func (m *AlertMutation) UpdatedAtCleared() bool { + _, ok := m.clearedFields[alert.FieldUpdatedAt] + return ok +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *AlertMutation) ResetUpdatedAt() { + m.updated_at = nil + delete(m.clearedFields, alert.FieldUpdatedAt) +} + +// SetScenario sets the "scenario" field. +func (m *AlertMutation) SetScenario(s string) { + m.scenario = &s +} + +// Scenario returns the value of the "scenario" field in the mutation. +func (m *AlertMutation) Scenario() (r string, exists bool) { + v := m.scenario + if v == nil { + return + } + return *v, true +} + +// OldScenario returns the old "scenario" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldScenario(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScenario is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScenario requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScenario: %w", err) + } + return oldValue.Scenario, nil +} + +// ResetScenario resets all changes to the "scenario" field. +func (m *AlertMutation) ResetScenario() { + m.scenario = nil +} + +// SetBucketId sets the "bucketId" field. +func (m *AlertMutation) SetBucketId(s string) { + m.bucketId = &s +} + +// BucketId returns the value of the "bucketId" field in the mutation. +func (m *AlertMutation) BucketId() (r string, exists bool) { + v := m.bucketId + if v == nil { + return + } + return *v, true +} + +// OldBucketId returns the old "bucketId" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldBucketId(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBucketId is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBucketId requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBucketId: %w", err) + } + return oldValue.BucketId, nil +} + +// ClearBucketId clears the value of the "bucketId" field. +func (m *AlertMutation) ClearBucketId() { + m.bucketId = nil + m.clearedFields[alert.FieldBucketId] = struct{}{} +} + +// BucketIdCleared returns if the "bucketId" field was cleared in this mutation. +func (m *AlertMutation) BucketIdCleared() bool { + _, ok := m.clearedFields[alert.FieldBucketId] + return ok +} + +// ResetBucketId resets all changes to the "bucketId" field. +func (m *AlertMutation) ResetBucketId() { + m.bucketId = nil + delete(m.clearedFields, alert.FieldBucketId) +} + +// SetMessage sets the "message" field. +func (m *AlertMutation) SetMessage(s string) { + m.message = &s +} + +// Message returns the value of the "message" field in the mutation. +func (m *AlertMutation) Message() (r string, exists bool) { + v := m.message + if v == nil { + return + } + return *v, true +} + +// OldMessage returns the old "message" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldMessage(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMessage is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMessage requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMessage: %w", err) + } + return oldValue.Message, nil +} + +// ClearMessage clears the value of the "message" field. +func (m *AlertMutation) ClearMessage() { + m.message = nil + m.clearedFields[alert.FieldMessage] = struct{}{} +} + +// MessageCleared returns if the "message" field was cleared in this mutation. +func (m *AlertMutation) MessageCleared() bool { + _, ok := m.clearedFields[alert.FieldMessage] + return ok +} + +// ResetMessage resets all changes to the "message" field. +func (m *AlertMutation) ResetMessage() { + m.message = nil + delete(m.clearedFields, alert.FieldMessage) +} + +// SetEventsCount sets the "eventsCount" field. +func (m *AlertMutation) SetEventsCount(i int32) { + m.eventsCount = &i + m.addeventsCount = nil +} + +// EventsCount returns the value of the "eventsCount" field in the mutation. +func (m *AlertMutation) EventsCount() (r int32, exists bool) { + v := m.eventsCount + if v == nil { + return + } + return *v, true +} + +// OldEventsCount returns the old "eventsCount" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldEventsCount(ctx context.Context) (v int32, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEventsCount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEventsCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEventsCount: %w", err) + } + return oldValue.EventsCount, nil +} + +// AddEventsCount adds i to the "eventsCount" field. +func (m *AlertMutation) AddEventsCount(i int32) { + if m.addeventsCount != nil { + *m.addeventsCount += i + } else { + m.addeventsCount = &i + } +} + +// AddedEventsCount returns the value that was added to the "eventsCount" field in this mutation. +func (m *AlertMutation) AddedEventsCount() (r int32, exists bool) { + v := m.addeventsCount + if v == nil { + return + } + return *v, true +} + +// ClearEventsCount clears the value of the "eventsCount" field. +func (m *AlertMutation) ClearEventsCount() { + m.eventsCount = nil + m.addeventsCount = nil + m.clearedFields[alert.FieldEventsCount] = struct{}{} +} + +// EventsCountCleared returns if the "eventsCount" field was cleared in this mutation. +func (m *AlertMutation) EventsCountCleared() bool { + _, ok := m.clearedFields[alert.FieldEventsCount] + return ok +} + +// ResetEventsCount resets all changes to the "eventsCount" field. +func (m *AlertMutation) ResetEventsCount() { + m.eventsCount = nil + m.addeventsCount = nil + delete(m.clearedFields, alert.FieldEventsCount) +} + +// SetStartedAt sets the "startedAt" field. +func (m *AlertMutation) SetStartedAt(t time.Time) { + m.startedAt = &t +} + +// StartedAt returns the value of the "startedAt" field in the mutation. +func (m *AlertMutation) StartedAt() (r time.Time, exists bool) { + v := m.startedAt + if v == nil { + return + } + return *v, true +} + +// OldStartedAt returns the old "startedAt" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldStartedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartedAt: %w", err) + } + return oldValue.StartedAt, nil +} + +// ClearStartedAt clears the value of the "startedAt" field. +func (m *AlertMutation) ClearStartedAt() { + m.startedAt = nil + m.clearedFields[alert.FieldStartedAt] = struct{}{} +} + +// StartedAtCleared returns if the "startedAt" field was cleared in this mutation. +func (m *AlertMutation) StartedAtCleared() bool { + _, ok := m.clearedFields[alert.FieldStartedAt] + return ok +} + +// ResetStartedAt resets all changes to the "startedAt" field. +func (m *AlertMutation) ResetStartedAt() { + m.startedAt = nil + delete(m.clearedFields, alert.FieldStartedAt) +} + +// SetStoppedAt sets the "stoppedAt" field. +func (m *AlertMutation) SetStoppedAt(t time.Time) { + m.stoppedAt = &t +} + +// StoppedAt returns the value of the "stoppedAt" field in the mutation. +func (m *AlertMutation) StoppedAt() (r time.Time, exists bool) { + v := m.stoppedAt + if v == nil { + return + } + return *v, true +} + +// OldStoppedAt returns the old "stoppedAt" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldStoppedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStoppedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStoppedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStoppedAt: %w", err) + } + return oldValue.StoppedAt, nil +} + +// ClearStoppedAt clears the value of the "stoppedAt" field. +func (m *AlertMutation) ClearStoppedAt() { + m.stoppedAt = nil + m.clearedFields[alert.FieldStoppedAt] = struct{}{} +} + +// StoppedAtCleared returns if the "stoppedAt" field was cleared in this mutation. +func (m *AlertMutation) StoppedAtCleared() bool { + _, ok := m.clearedFields[alert.FieldStoppedAt] + return ok +} + +// ResetStoppedAt resets all changes to the "stoppedAt" field. +func (m *AlertMutation) ResetStoppedAt() { + m.stoppedAt = nil + delete(m.clearedFields, alert.FieldStoppedAt) +} + +// SetSourceIp sets the "sourceIp" field. +func (m *AlertMutation) SetSourceIp(s string) { + m.sourceIp = &s +} + +// SourceIp returns the value of the "sourceIp" field in the mutation. +func (m *AlertMutation) SourceIp() (r string, exists bool) { + v := m.sourceIp + if v == nil { + return + } + return *v, true +} + +// OldSourceIp returns the old "sourceIp" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldSourceIp(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceIp is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceIp requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceIp: %w", err) + } + return oldValue.SourceIp, nil +} + +// ClearSourceIp clears the value of the "sourceIp" field. +func (m *AlertMutation) ClearSourceIp() { + m.sourceIp = nil + m.clearedFields[alert.FieldSourceIp] = struct{}{} +} + +// SourceIpCleared returns if the "sourceIp" field was cleared in this mutation. +func (m *AlertMutation) SourceIpCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceIp] + return ok +} + +// ResetSourceIp resets all changes to the "sourceIp" field. +func (m *AlertMutation) ResetSourceIp() { + m.sourceIp = nil + delete(m.clearedFields, alert.FieldSourceIp) +} + +// SetSourceRange sets the "sourceRange" field. +func (m *AlertMutation) SetSourceRange(s string) { + m.sourceRange = &s +} + +// SourceRange returns the value of the "sourceRange" field in the mutation. +func (m *AlertMutation) SourceRange() (r string, exists bool) { + v := m.sourceRange + if v == nil { + return + } + return *v, true +} + +// OldSourceRange returns the old "sourceRange" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldSourceRange(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceRange is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceRange requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceRange: %w", err) + } + return oldValue.SourceRange, nil +} + +// ClearSourceRange clears the value of the "sourceRange" field. +func (m *AlertMutation) ClearSourceRange() { + m.sourceRange = nil + m.clearedFields[alert.FieldSourceRange] = struct{}{} +} + +// SourceRangeCleared returns if the "sourceRange" field was cleared in this mutation. +func (m *AlertMutation) SourceRangeCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceRange] + return ok +} + +// ResetSourceRange resets all changes to the "sourceRange" field. +func (m *AlertMutation) ResetSourceRange() { + m.sourceRange = nil + delete(m.clearedFields, alert.FieldSourceRange) +} + +// SetSourceAsNumber sets the "sourceAsNumber" field. +func (m *AlertMutation) SetSourceAsNumber(s string) { + m.sourceAsNumber = &s +} + +// SourceAsNumber returns the value of the "sourceAsNumber" field in the mutation. +func (m *AlertMutation) SourceAsNumber() (r string, exists bool) { + v := m.sourceAsNumber + if v == nil { + return + } + return *v, true +} + +// OldSourceAsNumber returns the old "sourceAsNumber" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldSourceAsNumber(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceAsNumber is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceAsNumber requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceAsNumber: %w", err) + } + return oldValue.SourceAsNumber, nil +} + +// ClearSourceAsNumber clears the value of the "sourceAsNumber" field. +func (m *AlertMutation) ClearSourceAsNumber() { + m.sourceAsNumber = nil + m.clearedFields[alert.FieldSourceAsNumber] = struct{}{} +} + +// SourceAsNumberCleared returns if the "sourceAsNumber" field was cleared in this mutation. +func (m *AlertMutation) SourceAsNumberCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceAsNumber] + return ok +} + +// ResetSourceAsNumber resets all changes to the "sourceAsNumber" field. +func (m *AlertMutation) ResetSourceAsNumber() { + m.sourceAsNumber = nil + delete(m.clearedFields, alert.FieldSourceAsNumber) +} + +// SetSourceAsName sets the "sourceAsName" field. +func (m *AlertMutation) SetSourceAsName(s string) { + m.sourceAsName = &s +} + +// SourceAsName returns the value of the "sourceAsName" field in the mutation. +func (m *AlertMutation) SourceAsName() (r string, exists bool) { + v := m.sourceAsName + if v == nil { + return + } + return *v, true +} + +// OldSourceAsName returns the old "sourceAsName" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldSourceAsName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceAsName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceAsName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceAsName: %w", err) + } + return oldValue.SourceAsName, nil +} + +// ClearSourceAsName clears the value of the "sourceAsName" field. +func (m *AlertMutation) ClearSourceAsName() { + m.sourceAsName = nil + m.clearedFields[alert.FieldSourceAsName] = struct{}{} +} + +// SourceAsNameCleared returns if the "sourceAsName" field was cleared in this mutation. +func (m *AlertMutation) SourceAsNameCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceAsName] + return ok +} + +// ResetSourceAsName resets all changes to the "sourceAsName" field. +func (m *AlertMutation) ResetSourceAsName() { + m.sourceAsName = nil + delete(m.clearedFields, alert.FieldSourceAsName) +} + +// SetSourceCountry sets the "sourceCountry" field. +func (m *AlertMutation) SetSourceCountry(s string) { + m.sourceCountry = &s +} + +// SourceCountry returns the value of the "sourceCountry" field in the mutation. +func (m *AlertMutation) SourceCountry() (r string, exists bool) { + v := m.sourceCountry + if v == nil { + return + } + return *v, true +} + +// OldSourceCountry returns the old "sourceCountry" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldSourceCountry(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceCountry is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceCountry requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceCountry: %w", err) + } + return oldValue.SourceCountry, nil +} + +// ClearSourceCountry clears the value of the "sourceCountry" field. +func (m *AlertMutation) ClearSourceCountry() { + m.sourceCountry = nil + m.clearedFields[alert.FieldSourceCountry] = struct{}{} +} + +// SourceCountryCleared returns if the "sourceCountry" field was cleared in this mutation. +func (m *AlertMutation) SourceCountryCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceCountry] + return ok +} + +// ResetSourceCountry resets all changes to the "sourceCountry" field. +func (m *AlertMutation) ResetSourceCountry() { + m.sourceCountry = nil + delete(m.clearedFields, alert.FieldSourceCountry) +} + +// SetSourceLatitude sets the "sourceLatitude" field. +func (m *AlertMutation) SetSourceLatitude(f float32) { + m.sourceLatitude = &f + m.addsourceLatitude = nil +} + +// SourceLatitude returns the value of the "sourceLatitude" field in the mutation. +func (m *AlertMutation) SourceLatitude() (r float32, exists bool) { + v := m.sourceLatitude + if v == nil { + return + } + return *v, true +} + +// OldSourceLatitude returns the old "sourceLatitude" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldSourceLatitude(ctx context.Context) (v float32, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceLatitude is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceLatitude requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceLatitude: %w", err) + } + return oldValue.SourceLatitude, nil +} + +// AddSourceLatitude adds f to the "sourceLatitude" field. +func (m *AlertMutation) AddSourceLatitude(f float32) { + if m.addsourceLatitude != nil { + *m.addsourceLatitude += f + } else { + m.addsourceLatitude = &f + } +} + +// AddedSourceLatitude returns the value that was added to the "sourceLatitude" field in this mutation. +func (m *AlertMutation) AddedSourceLatitude() (r float32, exists bool) { + v := m.addsourceLatitude + if v == nil { + return + } + return *v, true +} + +// ClearSourceLatitude clears the value of the "sourceLatitude" field. +func (m *AlertMutation) ClearSourceLatitude() { + m.sourceLatitude = nil + m.addsourceLatitude = nil + m.clearedFields[alert.FieldSourceLatitude] = struct{}{} +} + +// SourceLatitudeCleared returns if the "sourceLatitude" field was cleared in this mutation. +func (m *AlertMutation) SourceLatitudeCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceLatitude] + return ok +} + +// ResetSourceLatitude resets all changes to the "sourceLatitude" field. +func (m *AlertMutation) ResetSourceLatitude() { + m.sourceLatitude = nil + m.addsourceLatitude = nil + delete(m.clearedFields, alert.FieldSourceLatitude) +} + +// SetSourceLongitude sets the "sourceLongitude" field. +func (m *AlertMutation) SetSourceLongitude(f float32) { + m.sourceLongitude = &f + m.addsourceLongitude = nil +} + +// SourceLongitude returns the value of the "sourceLongitude" field in the mutation. +func (m *AlertMutation) SourceLongitude() (r float32, exists bool) { + v := m.sourceLongitude + if v == nil { + return + } + return *v, true +} + +// OldSourceLongitude returns the old "sourceLongitude" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldSourceLongitude(ctx context.Context) (v float32, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceLongitude is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceLongitude requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceLongitude: %w", err) + } + return oldValue.SourceLongitude, nil +} + +// AddSourceLongitude adds f to the "sourceLongitude" field. +func (m *AlertMutation) AddSourceLongitude(f float32) { + if m.addsourceLongitude != nil { + *m.addsourceLongitude += f + } else { + m.addsourceLongitude = &f + } +} + +// AddedSourceLongitude returns the value that was added to the "sourceLongitude" field in this mutation. +func (m *AlertMutation) AddedSourceLongitude() (r float32, exists bool) { + v := m.addsourceLongitude + if v == nil { + return + } + return *v, true +} + +// ClearSourceLongitude clears the value of the "sourceLongitude" field. +func (m *AlertMutation) ClearSourceLongitude() { + m.sourceLongitude = nil + m.addsourceLongitude = nil + m.clearedFields[alert.FieldSourceLongitude] = struct{}{} +} + +// SourceLongitudeCleared returns if the "sourceLongitude" field was cleared in this mutation. +func (m *AlertMutation) SourceLongitudeCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceLongitude] + return ok +} + +// ResetSourceLongitude resets all changes to the "sourceLongitude" field. +func (m *AlertMutation) ResetSourceLongitude() { + m.sourceLongitude = nil + m.addsourceLongitude = nil + delete(m.clearedFields, alert.FieldSourceLongitude) +} + +// SetSourceScope sets the "sourceScope" field. +func (m *AlertMutation) SetSourceScope(s string) { + m.sourceScope = &s +} + +// SourceScope returns the value of the "sourceScope" field in the mutation. +func (m *AlertMutation) SourceScope() (r string, exists bool) { + v := m.sourceScope + if v == nil { + return + } + return *v, true +} + +// OldSourceScope returns the old "sourceScope" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldSourceScope(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceScope is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceScope requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceScope: %w", err) + } + return oldValue.SourceScope, nil +} + +// ClearSourceScope clears the value of the "sourceScope" field. +func (m *AlertMutation) ClearSourceScope() { + m.sourceScope = nil + m.clearedFields[alert.FieldSourceScope] = struct{}{} +} + +// SourceScopeCleared returns if the "sourceScope" field was cleared in this mutation. +func (m *AlertMutation) SourceScopeCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceScope] + return ok +} + +// ResetSourceScope resets all changes to the "sourceScope" field. +func (m *AlertMutation) ResetSourceScope() { + m.sourceScope = nil + delete(m.clearedFields, alert.FieldSourceScope) +} + +// SetSourceValue sets the "sourceValue" field. +func (m *AlertMutation) SetSourceValue(s string) { + m.sourceValue = &s +} + +// SourceValue returns the value of the "sourceValue" field in the mutation. +func (m *AlertMutation) SourceValue() (r string, exists bool) { + v := m.sourceValue + if v == nil { + return + } + return *v, true +} + +// OldSourceValue returns the old "sourceValue" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldSourceValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceValue: %w", err) + } + return oldValue.SourceValue, nil +} + +// ClearSourceValue clears the value of the "sourceValue" field. +func (m *AlertMutation) ClearSourceValue() { + m.sourceValue = nil + m.clearedFields[alert.FieldSourceValue] = struct{}{} +} + +// SourceValueCleared returns if the "sourceValue" field was cleared in this mutation. +func (m *AlertMutation) SourceValueCleared() bool { + _, ok := m.clearedFields[alert.FieldSourceValue] + return ok +} + +// ResetSourceValue resets all changes to the "sourceValue" field. +func (m *AlertMutation) ResetSourceValue() { + m.sourceValue = nil + delete(m.clearedFields, alert.FieldSourceValue) +} + +// SetCapacity sets the "capacity" field. +func (m *AlertMutation) SetCapacity(i int32) { + m.capacity = &i + m.addcapacity = nil +} + +// Capacity returns the value of the "capacity" field in the mutation. +func (m *AlertMutation) Capacity() (r int32, exists bool) { + v := m.capacity + if v == nil { + return + } + return *v, true +} + +// OldCapacity returns the old "capacity" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldCapacity(ctx context.Context) (v int32, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCapacity is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCapacity requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCapacity: %w", err) + } + return oldValue.Capacity, nil +} + +// AddCapacity adds i to the "capacity" field. +func (m *AlertMutation) AddCapacity(i int32) { + if m.addcapacity != nil { + *m.addcapacity += i + } else { + m.addcapacity = &i + } +} + +// AddedCapacity returns the value that was added to the "capacity" field in this mutation. +func (m *AlertMutation) AddedCapacity() (r int32, exists bool) { + v := m.addcapacity + if v == nil { + return + } + return *v, true +} + +// ClearCapacity clears the value of the "capacity" field. +func (m *AlertMutation) ClearCapacity() { + m.capacity = nil + m.addcapacity = nil + m.clearedFields[alert.FieldCapacity] = struct{}{} +} + +// CapacityCleared returns if the "capacity" field was cleared in this mutation. +func (m *AlertMutation) CapacityCleared() bool { + _, ok := m.clearedFields[alert.FieldCapacity] + return ok +} + +// ResetCapacity resets all changes to the "capacity" field. +func (m *AlertMutation) ResetCapacity() { + m.capacity = nil + m.addcapacity = nil + delete(m.clearedFields, alert.FieldCapacity) +} + +// SetLeakSpeed sets the "leakSpeed" field. +func (m *AlertMutation) SetLeakSpeed(s string) { + m.leakSpeed = &s +} + +// LeakSpeed returns the value of the "leakSpeed" field in the mutation. +func (m *AlertMutation) LeakSpeed() (r string, exists bool) { + v := m.leakSpeed + if v == nil { + return + } + return *v, true +} + +// OldLeakSpeed returns the old "leakSpeed" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldLeakSpeed(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLeakSpeed is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLeakSpeed requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLeakSpeed: %w", err) + } + return oldValue.LeakSpeed, nil +} + +// ClearLeakSpeed clears the value of the "leakSpeed" field. +func (m *AlertMutation) ClearLeakSpeed() { + m.leakSpeed = nil + m.clearedFields[alert.FieldLeakSpeed] = struct{}{} +} + +// LeakSpeedCleared returns if the "leakSpeed" field was cleared in this mutation. +func (m *AlertMutation) LeakSpeedCleared() bool { + _, ok := m.clearedFields[alert.FieldLeakSpeed] + return ok +} + +// ResetLeakSpeed resets all changes to the "leakSpeed" field. +func (m *AlertMutation) ResetLeakSpeed() { + m.leakSpeed = nil + delete(m.clearedFields, alert.FieldLeakSpeed) +} + +// SetScenarioVersion sets the "scenarioVersion" field. +func (m *AlertMutation) SetScenarioVersion(s string) { + m.scenarioVersion = &s +} + +// ScenarioVersion returns the value of the "scenarioVersion" field in the mutation. +func (m *AlertMutation) ScenarioVersion() (r string, exists bool) { + v := m.scenarioVersion + if v == nil { + return + } + return *v, true +} + +// OldScenarioVersion returns the old "scenarioVersion" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldScenarioVersion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScenarioVersion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScenarioVersion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScenarioVersion: %w", err) + } + return oldValue.ScenarioVersion, nil +} + +// ClearScenarioVersion clears the value of the "scenarioVersion" field. +func (m *AlertMutation) ClearScenarioVersion() { + m.scenarioVersion = nil + m.clearedFields[alert.FieldScenarioVersion] = struct{}{} +} + +// ScenarioVersionCleared returns if the "scenarioVersion" field was cleared in this mutation. +func (m *AlertMutation) ScenarioVersionCleared() bool { + _, ok := m.clearedFields[alert.FieldScenarioVersion] + return ok +} + +// ResetScenarioVersion resets all changes to the "scenarioVersion" field. +func (m *AlertMutation) ResetScenarioVersion() { + m.scenarioVersion = nil + delete(m.clearedFields, alert.FieldScenarioVersion) +} + +// SetScenarioHash sets the "scenarioHash" field. +func (m *AlertMutation) SetScenarioHash(s string) { + m.scenarioHash = &s +} + +// ScenarioHash returns the value of the "scenarioHash" field in the mutation. +func (m *AlertMutation) ScenarioHash() (r string, exists bool) { + v := m.scenarioHash + if v == nil { + return + } + return *v, true +} + +// OldScenarioHash returns the old "scenarioHash" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldScenarioHash(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScenarioHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScenarioHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScenarioHash: %w", err) + } + return oldValue.ScenarioHash, nil +} + +// ClearScenarioHash clears the value of the "scenarioHash" field. +func (m *AlertMutation) ClearScenarioHash() { + m.scenarioHash = nil + m.clearedFields[alert.FieldScenarioHash] = struct{}{} +} + +// ScenarioHashCleared returns if the "scenarioHash" field was cleared in this mutation. +func (m *AlertMutation) ScenarioHashCleared() bool { + _, ok := m.clearedFields[alert.FieldScenarioHash] + return ok +} + +// ResetScenarioHash resets all changes to the "scenarioHash" field. +func (m *AlertMutation) ResetScenarioHash() { + m.scenarioHash = nil + delete(m.clearedFields, alert.FieldScenarioHash) +} + +// SetSimulated sets the "simulated" field. +func (m *AlertMutation) SetSimulated(b bool) { + m.simulated = &b +} + +// Simulated returns the value of the "simulated" field in the mutation. +func (m *AlertMutation) Simulated() (r bool, exists bool) { + v := m.simulated + if v == nil { + return + } + return *v, true +} + +// OldSimulated returns the old "simulated" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldSimulated(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSimulated is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSimulated requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSimulated: %w", err) + } + return oldValue.Simulated, nil +} + +// ResetSimulated resets all changes to the "simulated" field. +func (m *AlertMutation) ResetSimulated() { + m.simulated = nil +} + +// SetOwnerID sets the "owner" edge to the Machine entity by id. +func (m *AlertMutation) SetOwnerID(id int) { + m.owner = &id +} + +// ClearOwner clears the "owner" edge to the Machine entity. +func (m *AlertMutation) ClearOwner() { + m.clearedowner = true +} + +// OwnerCleared reports if the "owner" edge to the Machine entity was cleared. +func (m *AlertMutation) OwnerCleared() bool { + return m.clearedowner +} + +// OwnerID returns the "owner" edge ID in the mutation. +func (m *AlertMutation) OwnerID() (id int, exists bool) { + if m.owner != nil { + return *m.owner, true + } + return +} + +// OwnerIDs returns the "owner" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// OwnerID instead. It exists only for internal usage by the builders. +func (m *AlertMutation) OwnerIDs() (ids []int) { + if id := m.owner; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetOwner resets all changes to the "owner" edge. +func (m *AlertMutation) ResetOwner() { + m.owner = nil + m.clearedowner = false +} + +// AddDecisionIDs adds the "decisions" edge to the Decision entity by ids. +func (m *AlertMutation) AddDecisionIDs(ids ...int) { + if m.decisions == nil { + m.decisions = make(map[int]struct{}) + } + for i := range ids { + m.decisions[ids[i]] = struct{}{} + } +} + +// ClearDecisions clears the "decisions" edge to the Decision entity. +func (m *AlertMutation) ClearDecisions() { + m.cleareddecisions = true +} + +// DecisionsCleared reports if the "decisions" edge to the Decision entity was cleared. +func (m *AlertMutation) DecisionsCleared() bool { + return m.cleareddecisions +} + +// RemoveDecisionIDs removes the "decisions" edge to the Decision entity by IDs. +func (m *AlertMutation) RemoveDecisionIDs(ids ...int) { + if m.removeddecisions == nil { + m.removeddecisions = make(map[int]struct{}) + } + for i := range ids { + delete(m.decisions, ids[i]) + m.removeddecisions[ids[i]] = struct{}{} + } +} + +// RemovedDecisions returns the removed IDs of the "decisions" edge to the Decision entity. +func (m *AlertMutation) RemovedDecisionsIDs() (ids []int) { + for id := range m.removeddecisions { + ids = append(ids, id) + } + return +} + +// DecisionsIDs returns the "decisions" edge IDs in the mutation. +func (m *AlertMutation) DecisionsIDs() (ids []int) { + for id := range m.decisions { + ids = append(ids, id) + } + return +} + +// ResetDecisions resets all changes to the "decisions" edge. +func (m *AlertMutation) ResetDecisions() { + m.decisions = nil + m.cleareddecisions = false + m.removeddecisions = nil +} + +// AddEventIDs adds the "events" edge to the Event entity by ids. +func (m *AlertMutation) AddEventIDs(ids ...int) { + if m.events == nil { + m.events = make(map[int]struct{}) + } + for i := range ids { + m.events[ids[i]] = struct{}{} + } +} + +// ClearEvents clears the "events" edge to the Event entity. +func (m *AlertMutation) ClearEvents() { + m.clearedevents = true +} + +// EventsCleared reports if the "events" edge to the Event entity was cleared. +func (m *AlertMutation) EventsCleared() bool { + return m.clearedevents +} + +// RemoveEventIDs removes the "events" edge to the Event entity by IDs. +func (m *AlertMutation) RemoveEventIDs(ids ...int) { + if m.removedevents == nil { + m.removedevents = make(map[int]struct{}) + } + for i := range ids { + delete(m.events, ids[i]) + m.removedevents[ids[i]] = struct{}{} + } +} + +// RemovedEvents returns the removed IDs of the "events" edge to the Event entity. +func (m *AlertMutation) RemovedEventsIDs() (ids []int) { + for id := range m.removedevents { + ids = append(ids, id) + } + return +} + +// EventsIDs returns the "events" edge IDs in the mutation. +func (m *AlertMutation) EventsIDs() (ids []int) { + for id := range m.events { + ids = append(ids, id) + } + return +} + +// ResetEvents resets all changes to the "events" edge. +func (m *AlertMutation) ResetEvents() { + m.events = nil + m.clearedevents = false + m.removedevents = nil +} + +// AddMetaIDs adds the "metas" edge to the Meta entity by ids. +func (m *AlertMutation) AddMetaIDs(ids ...int) { + if m.metas == nil { + m.metas = make(map[int]struct{}) + } + for i := range ids { + m.metas[ids[i]] = struct{}{} + } +} + +// ClearMetas clears the "metas" edge to the Meta entity. +func (m *AlertMutation) ClearMetas() { + m.clearedmetas = true +} + +// MetasCleared reports if the "metas" edge to the Meta entity was cleared. +func (m *AlertMutation) MetasCleared() bool { + return m.clearedmetas +} + +// RemoveMetaIDs removes the "metas" edge to the Meta entity by IDs. +func (m *AlertMutation) RemoveMetaIDs(ids ...int) { + if m.removedmetas == nil { + m.removedmetas = make(map[int]struct{}) + } + for i := range ids { + delete(m.metas, ids[i]) + m.removedmetas[ids[i]] = struct{}{} + } +} + +// RemovedMetas returns the removed IDs of the "metas" edge to the Meta entity. +func (m *AlertMutation) RemovedMetasIDs() (ids []int) { + for id := range m.removedmetas { + ids = append(ids, id) + } + return +} + +// MetasIDs returns the "metas" edge IDs in the mutation. +func (m *AlertMutation) MetasIDs() (ids []int) { + for id := range m.metas { + ids = append(ids, id) + } + return +} + +// ResetMetas resets all changes to the "metas" edge. +func (m *AlertMutation) ResetMetas() { + m.metas = nil + m.clearedmetas = false + m.removedmetas = nil +} + +// Where appends a list predicates to the AlertMutation builder. +func (m *AlertMutation) Where(ps ...predicate.Alert) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *AlertMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Alert). +func (m *AlertMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AlertMutation) Fields() []string { + fields := make([]string, 0, 22) + if m.created_at != nil { + fields = append(fields, alert.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, alert.FieldUpdatedAt) + } + if m.scenario != nil { + fields = append(fields, alert.FieldScenario) + } + if m.bucketId != nil { + fields = append(fields, alert.FieldBucketId) + } + if m.message != nil { + fields = append(fields, alert.FieldMessage) + } + if m.eventsCount != nil { + fields = append(fields, alert.FieldEventsCount) + } + if m.startedAt != nil { + fields = append(fields, alert.FieldStartedAt) + } + if m.stoppedAt != nil { + fields = append(fields, alert.FieldStoppedAt) + } + if m.sourceIp != nil { + fields = append(fields, alert.FieldSourceIp) + } + if m.sourceRange != nil { + fields = append(fields, alert.FieldSourceRange) + } + if m.sourceAsNumber != nil { + fields = append(fields, alert.FieldSourceAsNumber) + } + if m.sourceAsName != nil { + fields = append(fields, alert.FieldSourceAsName) + } + if m.sourceCountry != nil { + fields = append(fields, alert.FieldSourceCountry) + } + if m.sourceLatitude != nil { + fields = append(fields, alert.FieldSourceLatitude) + } + if m.sourceLongitude != nil { + fields = append(fields, alert.FieldSourceLongitude) + } + if m.sourceScope != nil { + fields = append(fields, alert.FieldSourceScope) + } + if m.sourceValue != nil { + fields = append(fields, alert.FieldSourceValue) + } + if m.capacity != nil { + fields = append(fields, alert.FieldCapacity) + } + if m.leakSpeed != nil { + fields = append(fields, alert.FieldLeakSpeed) + } + if m.scenarioVersion != nil { + fields = append(fields, alert.FieldScenarioVersion) + } + if m.scenarioHash != nil { + fields = append(fields, alert.FieldScenarioHash) + } + if m.simulated != nil { + fields = append(fields, alert.FieldSimulated) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AlertMutation) Field(name string) (ent.Value, bool) { + switch name { + case alert.FieldCreatedAt: + return m.CreatedAt() + case alert.FieldUpdatedAt: + return m.UpdatedAt() + case alert.FieldScenario: + return m.Scenario() + case alert.FieldBucketId: + return m.BucketId() + case alert.FieldMessage: + return m.Message() + case alert.FieldEventsCount: + return m.EventsCount() + case alert.FieldStartedAt: + return m.StartedAt() + case alert.FieldStoppedAt: + return m.StoppedAt() + case alert.FieldSourceIp: + return m.SourceIp() + case alert.FieldSourceRange: + return m.SourceRange() + case alert.FieldSourceAsNumber: + return m.SourceAsNumber() + case alert.FieldSourceAsName: + return m.SourceAsName() + case alert.FieldSourceCountry: + return m.SourceCountry() + case alert.FieldSourceLatitude: + return m.SourceLatitude() + case alert.FieldSourceLongitude: + return m.SourceLongitude() + case alert.FieldSourceScope: + return m.SourceScope() + case alert.FieldSourceValue: + return m.SourceValue() + case alert.FieldCapacity: + return m.Capacity() + case alert.FieldLeakSpeed: + return m.LeakSpeed() + case alert.FieldScenarioVersion: + return m.ScenarioVersion() + case alert.FieldScenarioHash: + return m.ScenarioHash() + case alert.FieldSimulated: + return m.Simulated() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AlertMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case alert.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case alert.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case alert.FieldScenario: + return m.OldScenario(ctx) + case alert.FieldBucketId: + return m.OldBucketId(ctx) + case alert.FieldMessage: + return m.OldMessage(ctx) + case alert.FieldEventsCount: + return m.OldEventsCount(ctx) + case alert.FieldStartedAt: + return m.OldStartedAt(ctx) + case alert.FieldStoppedAt: + return m.OldStoppedAt(ctx) + case alert.FieldSourceIp: + return m.OldSourceIp(ctx) + case alert.FieldSourceRange: + return m.OldSourceRange(ctx) + case alert.FieldSourceAsNumber: + return m.OldSourceAsNumber(ctx) + case alert.FieldSourceAsName: + return m.OldSourceAsName(ctx) + case alert.FieldSourceCountry: + return m.OldSourceCountry(ctx) + case alert.FieldSourceLatitude: + return m.OldSourceLatitude(ctx) + case alert.FieldSourceLongitude: + return m.OldSourceLongitude(ctx) + case alert.FieldSourceScope: + return m.OldSourceScope(ctx) + case alert.FieldSourceValue: + return m.OldSourceValue(ctx) + case alert.FieldCapacity: + return m.OldCapacity(ctx) + case alert.FieldLeakSpeed: + return m.OldLeakSpeed(ctx) + case alert.FieldScenarioVersion: + return m.OldScenarioVersion(ctx) + case alert.FieldScenarioHash: + return m.OldScenarioHash(ctx) + case alert.FieldSimulated: + return m.OldSimulated(ctx) + } + return nil, fmt.Errorf("unknown Alert field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AlertMutation) SetField(name string, value ent.Value) error { + switch name { + case alert.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case alert.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case alert.FieldScenario: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScenario(v) + return nil + case alert.FieldBucketId: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBucketId(v) + return nil + case alert.FieldMessage: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMessage(v) + return nil + case alert.FieldEventsCount: + v, ok := value.(int32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEventsCount(v) + return nil + case alert.FieldStartedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartedAt(v) + return nil + case alert.FieldStoppedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStoppedAt(v) + return nil + case alert.FieldSourceIp: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceIp(v) + return nil + case alert.FieldSourceRange: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceRange(v) + return nil + case alert.FieldSourceAsNumber: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceAsNumber(v) + return nil + case alert.FieldSourceAsName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceAsName(v) + return nil + case alert.FieldSourceCountry: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceCountry(v) + return nil + case alert.FieldSourceLatitude: + v, ok := value.(float32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceLatitude(v) + return nil + case alert.FieldSourceLongitude: + v, ok := value.(float32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceLongitude(v) + return nil + case alert.FieldSourceScope: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceScope(v) + return nil + case alert.FieldSourceValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceValue(v) + return nil + case alert.FieldCapacity: + v, ok := value.(int32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCapacity(v) + return nil + case alert.FieldLeakSpeed: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLeakSpeed(v) + return nil + case alert.FieldScenarioVersion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScenarioVersion(v) + return nil + case alert.FieldScenarioHash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScenarioHash(v) + return nil + case alert.FieldSimulated: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSimulated(v) + return nil + } + return fmt.Errorf("unknown Alert field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AlertMutation) AddedFields() []string { + var fields []string + if m.addeventsCount != nil { + fields = append(fields, alert.FieldEventsCount) + } + if m.addsourceLatitude != nil { + fields = append(fields, alert.FieldSourceLatitude) + } + if m.addsourceLongitude != nil { + fields = append(fields, alert.FieldSourceLongitude) + } + if m.addcapacity != nil { + fields = append(fields, alert.FieldCapacity) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AlertMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case alert.FieldEventsCount: + return m.AddedEventsCount() + case alert.FieldSourceLatitude: + return m.AddedSourceLatitude() + case alert.FieldSourceLongitude: + return m.AddedSourceLongitude() + case alert.FieldCapacity: + return m.AddedCapacity() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AlertMutation) AddField(name string, value ent.Value) error { + switch name { + case alert.FieldEventsCount: + v, ok := value.(int32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddEventsCount(v) + return nil + case alert.FieldSourceLatitude: + v, ok := value.(float32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddSourceLatitude(v) + return nil + case alert.FieldSourceLongitude: + v, ok := value.(float32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddSourceLongitude(v) + return nil + case alert.FieldCapacity: + v, ok := value.(int32) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCapacity(v) + return nil + } + return fmt.Errorf("unknown Alert numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AlertMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(alert.FieldCreatedAt) { + fields = append(fields, alert.FieldCreatedAt) + } + if m.FieldCleared(alert.FieldUpdatedAt) { + fields = append(fields, alert.FieldUpdatedAt) + } + if m.FieldCleared(alert.FieldBucketId) { + fields = append(fields, alert.FieldBucketId) + } + if m.FieldCleared(alert.FieldMessage) { + fields = append(fields, alert.FieldMessage) + } + if m.FieldCleared(alert.FieldEventsCount) { + fields = append(fields, alert.FieldEventsCount) + } + if m.FieldCleared(alert.FieldStartedAt) { + fields = append(fields, alert.FieldStartedAt) + } + if m.FieldCleared(alert.FieldStoppedAt) { + fields = append(fields, alert.FieldStoppedAt) + } + if m.FieldCleared(alert.FieldSourceIp) { + fields = append(fields, alert.FieldSourceIp) + } + if m.FieldCleared(alert.FieldSourceRange) { + fields = append(fields, alert.FieldSourceRange) + } + if m.FieldCleared(alert.FieldSourceAsNumber) { + fields = append(fields, alert.FieldSourceAsNumber) + } + if m.FieldCleared(alert.FieldSourceAsName) { + fields = append(fields, alert.FieldSourceAsName) + } + if m.FieldCleared(alert.FieldSourceCountry) { + fields = append(fields, alert.FieldSourceCountry) + } + if m.FieldCleared(alert.FieldSourceLatitude) { + fields = append(fields, alert.FieldSourceLatitude) + } + if m.FieldCleared(alert.FieldSourceLongitude) { + fields = append(fields, alert.FieldSourceLongitude) + } + if m.FieldCleared(alert.FieldSourceScope) { + fields = append(fields, alert.FieldSourceScope) + } + if m.FieldCleared(alert.FieldSourceValue) { + fields = append(fields, alert.FieldSourceValue) + } + if m.FieldCleared(alert.FieldCapacity) { + fields = append(fields, alert.FieldCapacity) + } + if m.FieldCleared(alert.FieldLeakSpeed) { + fields = append(fields, alert.FieldLeakSpeed) + } + if m.FieldCleared(alert.FieldScenarioVersion) { + fields = append(fields, alert.FieldScenarioVersion) + } + if m.FieldCleared(alert.FieldScenarioHash) { + fields = append(fields, alert.FieldScenarioHash) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AlertMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AlertMutation) ClearField(name string) error { + switch name { + case alert.FieldCreatedAt: + m.ClearCreatedAt() + return nil + case alert.FieldUpdatedAt: + m.ClearUpdatedAt() + return nil + case alert.FieldBucketId: + m.ClearBucketId() + return nil + case alert.FieldMessage: + m.ClearMessage() + return nil + case alert.FieldEventsCount: + m.ClearEventsCount() + return nil + case alert.FieldStartedAt: + m.ClearStartedAt() + return nil + case alert.FieldStoppedAt: + m.ClearStoppedAt() + return nil + case alert.FieldSourceIp: + m.ClearSourceIp() + return nil + case alert.FieldSourceRange: + m.ClearSourceRange() + return nil + case alert.FieldSourceAsNumber: + m.ClearSourceAsNumber() + return nil + case alert.FieldSourceAsName: + m.ClearSourceAsName() + return nil + case alert.FieldSourceCountry: + m.ClearSourceCountry() + return nil + case alert.FieldSourceLatitude: + m.ClearSourceLatitude() + return nil + case alert.FieldSourceLongitude: + m.ClearSourceLongitude() + return nil + case alert.FieldSourceScope: + m.ClearSourceScope() + return nil + case alert.FieldSourceValue: + m.ClearSourceValue() + return nil + case alert.FieldCapacity: + m.ClearCapacity() + return nil + case alert.FieldLeakSpeed: + m.ClearLeakSpeed() + return nil + case alert.FieldScenarioVersion: + m.ClearScenarioVersion() + return nil + case alert.FieldScenarioHash: + m.ClearScenarioHash() + return nil + } + return fmt.Errorf("unknown Alert nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AlertMutation) ResetField(name string) error { + switch name { + case alert.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case alert.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case alert.FieldScenario: + m.ResetScenario() + return nil + case alert.FieldBucketId: + m.ResetBucketId() + return nil + case alert.FieldMessage: + m.ResetMessage() + return nil + case alert.FieldEventsCount: + m.ResetEventsCount() + return nil + case alert.FieldStartedAt: + m.ResetStartedAt() + return nil + case alert.FieldStoppedAt: + m.ResetStoppedAt() + return nil + case alert.FieldSourceIp: + m.ResetSourceIp() + return nil + case alert.FieldSourceRange: + m.ResetSourceRange() + return nil + case alert.FieldSourceAsNumber: + m.ResetSourceAsNumber() + return nil + case alert.FieldSourceAsName: + m.ResetSourceAsName() + return nil + case alert.FieldSourceCountry: + m.ResetSourceCountry() + return nil + case alert.FieldSourceLatitude: + m.ResetSourceLatitude() + return nil + case alert.FieldSourceLongitude: + m.ResetSourceLongitude() + return nil + case alert.FieldSourceScope: + m.ResetSourceScope() + return nil + case alert.FieldSourceValue: + m.ResetSourceValue() + return nil + case alert.FieldCapacity: + m.ResetCapacity() + return nil + case alert.FieldLeakSpeed: + m.ResetLeakSpeed() + return nil + case alert.FieldScenarioVersion: + m.ResetScenarioVersion() + return nil + case alert.FieldScenarioHash: + m.ResetScenarioHash() + return nil + case alert.FieldSimulated: + m.ResetSimulated() + return nil + } + return fmt.Errorf("unknown Alert field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AlertMutation) AddedEdges() []string { + edges := make([]string, 0, 4) + if m.owner != nil { + edges = append(edges, alert.EdgeOwner) + } + if m.decisions != nil { + edges = append(edges, alert.EdgeDecisions) + } + if m.events != nil { + edges = append(edges, alert.EdgeEvents) + } + if m.metas != nil { + edges = append(edges, alert.EdgeMetas) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AlertMutation) AddedIDs(name string) []ent.Value { + switch name { + case alert.EdgeOwner: + if id := m.owner; id != nil { + return []ent.Value{*id} + } + case alert.EdgeDecisions: + ids := make([]ent.Value, 0, len(m.decisions)) + for id := range m.decisions { + ids = append(ids, id) + } + return ids + case alert.EdgeEvents: + ids := make([]ent.Value, 0, len(m.events)) + for id := range m.events { + ids = append(ids, id) + } + return ids + case alert.EdgeMetas: + ids := make([]ent.Value, 0, len(m.metas)) + for id := range m.metas { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AlertMutation) RemovedEdges() []string { + edges := make([]string, 0, 4) + if m.removeddecisions != nil { + edges = append(edges, alert.EdgeDecisions) + } + if m.removedevents != nil { + edges = append(edges, alert.EdgeEvents) + } + if m.removedmetas != nil { + edges = append(edges, alert.EdgeMetas) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AlertMutation) RemovedIDs(name string) []ent.Value { + switch name { + case alert.EdgeDecisions: + ids := make([]ent.Value, 0, len(m.removeddecisions)) + for id := range m.removeddecisions { + ids = append(ids, id) + } + return ids + case alert.EdgeEvents: + ids := make([]ent.Value, 0, len(m.removedevents)) + for id := range m.removedevents { + ids = append(ids, id) + } + return ids + case alert.EdgeMetas: + ids := make([]ent.Value, 0, len(m.removedmetas)) + for id := range m.removedmetas { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AlertMutation) ClearedEdges() []string { + edges := make([]string, 0, 4) + if m.clearedowner { + edges = append(edges, alert.EdgeOwner) + } + if m.cleareddecisions { + edges = append(edges, alert.EdgeDecisions) + } + if m.clearedevents { + edges = append(edges, alert.EdgeEvents) + } + if m.clearedmetas { + edges = append(edges, alert.EdgeMetas) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AlertMutation) EdgeCleared(name string) bool { + switch name { + case alert.EdgeOwner: + return m.clearedowner + case alert.EdgeDecisions: + return m.cleareddecisions + case alert.EdgeEvents: + return m.clearedevents + case alert.EdgeMetas: + return m.clearedmetas + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AlertMutation) ClearEdge(name string) error { + switch name { + case alert.EdgeOwner: + m.ClearOwner() + return nil + } + return fmt.Errorf("unknown Alert unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AlertMutation) ResetEdge(name string) error { + switch name { + case alert.EdgeOwner: + m.ResetOwner() + return nil + case alert.EdgeDecisions: + m.ResetDecisions() + return nil + case alert.EdgeEvents: + m.ResetEvents() + return nil + case alert.EdgeMetas: + m.ResetMetas() + return nil + } + return fmt.Errorf("unknown Alert edge %s", name) +} + +// BouncerMutation represents an operation that mutates the Bouncer nodes in the graph. +type BouncerMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + name *string + api_key *string + revoked *bool + ip_address *string + _type *string + version *string + until *time.Time + last_pull *time.Time + auth_type *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Bouncer, error) + predicates []predicate.Bouncer +} + +var _ ent.Mutation = (*BouncerMutation)(nil) + +// bouncerOption allows management of the mutation configuration using functional options. +type bouncerOption func(*BouncerMutation) + +// newBouncerMutation creates new mutation for the Bouncer entity. +func newBouncerMutation(c config, op Op, opts ...bouncerOption) *BouncerMutation { + m := &BouncerMutation{ + config: c, + op: op, + typ: TypeBouncer, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withBouncerID sets the ID field of the mutation. +func withBouncerID(id int) bouncerOption { + return func(m *BouncerMutation) { + var ( + err error + once sync.Once + value *Bouncer + ) + m.oldValue = func(ctx context.Context) (*Bouncer, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Bouncer.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withBouncer sets the old Bouncer of the mutation. +func withBouncer(node *Bouncer) bouncerOption { + return func(m *BouncerMutation) { + m.oldValue = func(context.Context) (*Bouncer, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m BouncerMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m BouncerMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *BouncerMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *BouncerMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Bouncer.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *BouncerMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *BouncerMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (m *BouncerMutation) ClearCreatedAt() { + m.created_at = nil + m.clearedFields[bouncer.FieldCreatedAt] = struct{}{} +} + +// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. +func (m *BouncerMutation) CreatedAtCleared() bool { + _, ok := m.clearedFields[bouncer.FieldCreatedAt] + return ok +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *BouncerMutation) ResetCreatedAt() { + m.created_at = nil + delete(m.clearedFields, bouncer.FieldCreatedAt) +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *BouncerMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *BouncerMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (m *BouncerMutation) ClearUpdatedAt() { + m.updated_at = nil + m.clearedFields[bouncer.FieldUpdatedAt] = struct{}{} +} + +// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. +func (m *BouncerMutation) UpdatedAtCleared() bool { + _, ok := m.clearedFields[bouncer.FieldUpdatedAt] + return ok +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *BouncerMutation) ResetUpdatedAt() { + m.updated_at = nil + delete(m.clearedFields, bouncer.FieldUpdatedAt) +} + +// SetName sets the "name" field. +func (m *BouncerMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *BouncerMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *BouncerMutation) ResetName() { + m.name = nil +} + +// SetAPIKey sets the "api_key" field. +func (m *BouncerMutation) SetAPIKey(s string) { + m.api_key = &s +} + +// APIKey returns the value of the "api_key" field in the mutation. +func (m *BouncerMutation) APIKey() (r string, exists bool) { + v := m.api_key + if v == nil { + return + } + return *v, true +} + +// OldAPIKey returns the old "api_key" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldAPIKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAPIKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAPIKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAPIKey: %w", err) + } + return oldValue.APIKey, nil +} + +// ResetAPIKey resets all changes to the "api_key" field. +func (m *BouncerMutation) ResetAPIKey() { + m.api_key = nil +} + +// SetRevoked sets the "revoked" field. +func (m *BouncerMutation) SetRevoked(b bool) { + m.revoked = &b +} + +// Revoked returns the value of the "revoked" field in the mutation. +func (m *BouncerMutation) Revoked() (r bool, exists bool) { + v := m.revoked + if v == nil { + return + } + return *v, true +} + +// OldRevoked returns the old "revoked" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldRevoked(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRevoked is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRevoked requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRevoked: %w", err) + } + return oldValue.Revoked, nil +} + +// ResetRevoked resets all changes to the "revoked" field. +func (m *BouncerMutation) ResetRevoked() { + m.revoked = nil +} + +// SetIPAddress sets the "ip_address" field. +func (m *BouncerMutation) SetIPAddress(s string) { + m.ip_address = &s +} + +// IPAddress returns the value of the "ip_address" field in the mutation. +func (m *BouncerMutation) IPAddress() (r string, exists bool) { + v := m.ip_address + if v == nil { + return + } + return *v, true +} + +// OldIPAddress returns the old "ip_address" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldIPAddress(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIPAddress is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIPAddress requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIPAddress: %w", err) + } + return oldValue.IPAddress, nil +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (m *BouncerMutation) ClearIPAddress() { + m.ip_address = nil + m.clearedFields[bouncer.FieldIPAddress] = struct{}{} +} + +// IPAddressCleared returns if the "ip_address" field was cleared in this mutation. +func (m *BouncerMutation) IPAddressCleared() bool { + _, ok := m.clearedFields[bouncer.FieldIPAddress] + return ok +} + +// ResetIPAddress resets all changes to the "ip_address" field. +func (m *BouncerMutation) ResetIPAddress() { + m.ip_address = nil + delete(m.clearedFields, bouncer.FieldIPAddress) +} + +// SetType sets the "type" field. +func (m *BouncerMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the value of the "type" field in the mutation. +func (m *BouncerMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ClearType clears the value of the "type" field. +func (m *BouncerMutation) ClearType() { + m._type = nil + m.clearedFields[bouncer.FieldType] = struct{}{} +} + +// TypeCleared returns if the "type" field was cleared in this mutation. +func (m *BouncerMutation) TypeCleared() bool { + _, ok := m.clearedFields[bouncer.FieldType] + return ok +} + +// ResetType resets all changes to the "type" field. +func (m *BouncerMutation) ResetType() { + m._type = nil + delete(m.clearedFields, bouncer.FieldType) +} + +// SetVersion sets the "version" field. +func (m *BouncerMutation) SetVersion(s string) { + m.version = &s +} + +// Version returns the value of the "version" field in the mutation. +func (m *BouncerMutation) Version() (r string, exists bool) { + v := m.version + if v == nil { + return + } + return *v, true +} + +// OldVersion returns the old "version" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldVersion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldVersion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldVersion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldVersion: %w", err) + } + return oldValue.Version, nil +} + +// ClearVersion clears the value of the "version" field. +func (m *BouncerMutation) ClearVersion() { + m.version = nil + m.clearedFields[bouncer.FieldVersion] = struct{}{} +} + +// VersionCleared returns if the "version" field was cleared in this mutation. +func (m *BouncerMutation) VersionCleared() bool { + _, ok := m.clearedFields[bouncer.FieldVersion] + return ok +} + +// ResetVersion resets all changes to the "version" field. +func (m *BouncerMutation) ResetVersion() { + m.version = nil + delete(m.clearedFields, bouncer.FieldVersion) +} + +// SetUntil sets the "until" field. +func (m *BouncerMutation) SetUntil(t time.Time) { + m.until = &t +} + +// Until returns the value of the "until" field in the mutation. +func (m *BouncerMutation) Until() (r time.Time, exists bool) { + v := m.until + if v == nil { + return + } + return *v, true +} + +// OldUntil returns the old "until" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldUntil(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUntil is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUntil requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUntil: %w", err) + } + return oldValue.Until, nil +} + +// ClearUntil clears the value of the "until" field. +func (m *BouncerMutation) ClearUntil() { + m.until = nil + m.clearedFields[bouncer.FieldUntil] = struct{}{} +} + +// UntilCleared returns if the "until" field was cleared in this mutation. +func (m *BouncerMutation) UntilCleared() bool { + _, ok := m.clearedFields[bouncer.FieldUntil] + return ok +} + +// ResetUntil resets all changes to the "until" field. +func (m *BouncerMutation) ResetUntil() { + m.until = nil + delete(m.clearedFields, bouncer.FieldUntil) +} + +// SetLastPull sets the "last_pull" field. +func (m *BouncerMutation) SetLastPull(t time.Time) { + m.last_pull = &t +} + +// LastPull returns the value of the "last_pull" field in the mutation. +func (m *BouncerMutation) LastPull() (r time.Time, exists bool) { + v := m.last_pull + if v == nil { + return + } + return *v, true +} + +// OldLastPull returns the old "last_pull" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldLastPull(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLastPull is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLastPull requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastPull: %w", err) + } + return oldValue.LastPull, nil +} + +// ResetLastPull resets all changes to the "last_pull" field. +func (m *BouncerMutation) ResetLastPull() { + m.last_pull = nil +} + +// SetAuthType sets the "auth_type" field. +func (m *BouncerMutation) SetAuthType(s string) { + m.auth_type = &s +} + +// AuthType returns the value of the "auth_type" field in the mutation. +func (m *BouncerMutation) AuthType() (r string, exists bool) { + v := m.auth_type + if v == nil { + return + } + return *v, true +} + +// OldAuthType returns the old "auth_type" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldAuthType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAuthType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAuthType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAuthType: %w", err) + } + return oldValue.AuthType, nil +} + +// ResetAuthType resets all changes to the "auth_type" field. +func (m *BouncerMutation) ResetAuthType() { + m.auth_type = nil +} + +// Where appends a list predicates to the BouncerMutation builder. +func (m *BouncerMutation) Where(ps ...predicate.Bouncer) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *BouncerMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Bouncer). +func (m *BouncerMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *BouncerMutation) Fields() []string { + fields := make([]string, 0, 11) + if m.created_at != nil { + fields = append(fields, bouncer.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, bouncer.FieldUpdatedAt) + } + if m.name != nil { + fields = append(fields, bouncer.FieldName) + } + if m.api_key != nil { + fields = append(fields, bouncer.FieldAPIKey) + } + if m.revoked != nil { + fields = append(fields, bouncer.FieldRevoked) + } + if m.ip_address != nil { + fields = append(fields, bouncer.FieldIPAddress) + } + if m._type != nil { + fields = append(fields, bouncer.FieldType) + } + if m.version != nil { + fields = append(fields, bouncer.FieldVersion) + } + if m.until != nil { + fields = append(fields, bouncer.FieldUntil) + } + if m.last_pull != nil { + fields = append(fields, bouncer.FieldLastPull) + } + if m.auth_type != nil { + fields = append(fields, bouncer.FieldAuthType) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *BouncerMutation) Field(name string) (ent.Value, bool) { + switch name { + case bouncer.FieldCreatedAt: + return m.CreatedAt() + case bouncer.FieldUpdatedAt: + return m.UpdatedAt() + case bouncer.FieldName: + return m.Name() + case bouncer.FieldAPIKey: + return m.APIKey() + case bouncer.FieldRevoked: + return m.Revoked() + case bouncer.FieldIPAddress: + return m.IPAddress() + case bouncer.FieldType: + return m.GetType() + case bouncer.FieldVersion: + return m.Version() + case bouncer.FieldUntil: + return m.Until() + case bouncer.FieldLastPull: + return m.LastPull() + case bouncer.FieldAuthType: + return m.AuthType() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *BouncerMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case bouncer.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case bouncer.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case bouncer.FieldName: + return m.OldName(ctx) + case bouncer.FieldAPIKey: + return m.OldAPIKey(ctx) + case bouncer.FieldRevoked: + return m.OldRevoked(ctx) + case bouncer.FieldIPAddress: + return m.OldIPAddress(ctx) + case bouncer.FieldType: + return m.OldType(ctx) + case bouncer.FieldVersion: + return m.OldVersion(ctx) + case bouncer.FieldUntil: + return m.OldUntil(ctx) + case bouncer.FieldLastPull: + return m.OldLastPull(ctx) + case bouncer.FieldAuthType: + return m.OldAuthType(ctx) + } + return nil, fmt.Errorf("unknown Bouncer field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BouncerMutation) SetField(name string, value ent.Value) error { + switch name { + case bouncer.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case bouncer.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case bouncer.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case bouncer.FieldAPIKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAPIKey(v) + return nil + case bouncer.FieldRevoked: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRevoked(v) + return nil + case bouncer.FieldIPAddress: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIPAddress(v) + return nil + case bouncer.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case bouncer.FieldVersion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetVersion(v) + return nil + case bouncer.FieldUntil: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUntil(v) + return nil + case bouncer.FieldLastPull: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastPull(v) + return nil + case bouncer.FieldAuthType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAuthType(v) + return nil + } + return fmt.Errorf("unknown Bouncer field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *BouncerMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *BouncerMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BouncerMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Bouncer numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *BouncerMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(bouncer.FieldCreatedAt) { + fields = append(fields, bouncer.FieldCreatedAt) + } + if m.FieldCleared(bouncer.FieldUpdatedAt) { + fields = append(fields, bouncer.FieldUpdatedAt) + } + if m.FieldCleared(bouncer.FieldIPAddress) { + fields = append(fields, bouncer.FieldIPAddress) + } + if m.FieldCleared(bouncer.FieldType) { + fields = append(fields, bouncer.FieldType) + } + if m.FieldCleared(bouncer.FieldVersion) { + fields = append(fields, bouncer.FieldVersion) + } + if m.FieldCleared(bouncer.FieldUntil) { + fields = append(fields, bouncer.FieldUntil) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *BouncerMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *BouncerMutation) ClearField(name string) error { + switch name { + case bouncer.FieldCreatedAt: + m.ClearCreatedAt() + return nil + case bouncer.FieldUpdatedAt: + m.ClearUpdatedAt() + return nil + case bouncer.FieldIPAddress: + m.ClearIPAddress() + return nil + case bouncer.FieldType: + m.ClearType() + return nil + case bouncer.FieldVersion: + m.ClearVersion() + return nil + case bouncer.FieldUntil: + m.ClearUntil() + return nil + } + return fmt.Errorf("unknown Bouncer nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *BouncerMutation) ResetField(name string) error { + switch name { + case bouncer.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case bouncer.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case bouncer.FieldName: + m.ResetName() + return nil + case bouncer.FieldAPIKey: + m.ResetAPIKey() + return nil + case bouncer.FieldRevoked: + m.ResetRevoked() + return nil + case bouncer.FieldIPAddress: + m.ResetIPAddress() + return nil + case bouncer.FieldType: + m.ResetType() + return nil + case bouncer.FieldVersion: + m.ResetVersion() + return nil + case bouncer.FieldUntil: + m.ResetUntil() + return nil + case bouncer.FieldLastPull: + m.ResetLastPull() + return nil + case bouncer.FieldAuthType: + m.ResetAuthType() + return nil + } + return fmt.Errorf("unknown Bouncer field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *BouncerMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *BouncerMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *BouncerMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *BouncerMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *BouncerMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *BouncerMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *BouncerMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Bouncer unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *BouncerMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Bouncer edge %s", name) +} + +// DecisionMutation represents an operation that mutates the Decision nodes in the graph. +type DecisionMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + until *time.Time + scenario *string + _type *string + start_ip *int64 + addstart_ip *int64 + end_ip *int64 + addend_ip *int64 + start_suffix *int64 + addstart_suffix *int64 + end_suffix *int64 + addend_suffix *int64 + ip_size *int64 + addip_size *int64 + scope *string + value *string + origin *string + simulated *bool + clearedFields map[string]struct{} + owner *int + clearedowner bool + done bool + oldValue func(context.Context) (*Decision, error) + predicates []predicate.Decision +} + +var _ ent.Mutation = (*DecisionMutation)(nil) + +// decisionOption allows management of the mutation configuration using functional options. +type decisionOption func(*DecisionMutation) + +// newDecisionMutation creates new mutation for the Decision entity. +func newDecisionMutation(c config, op Op, opts ...decisionOption) *DecisionMutation { + m := &DecisionMutation{ + config: c, + op: op, + typ: TypeDecision, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDecisionID sets the ID field of the mutation. +func withDecisionID(id int) decisionOption { + return func(m *DecisionMutation) { + var ( + err error + once sync.Once + value *Decision + ) + m.oldValue = func(ctx context.Context) (*Decision, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Decision.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDecision sets the old Decision of the mutation. +func withDecision(node *Decision) decisionOption { + return func(m *DecisionMutation) { + m.oldValue = func(context.Context) (*Decision, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DecisionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DecisionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DecisionMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DecisionMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Decision.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *DecisionMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *DecisionMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (m *DecisionMutation) ClearCreatedAt() { + m.created_at = nil + m.clearedFields[decision.FieldCreatedAt] = struct{}{} +} + +// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. +func (m *DecisionMutation) CreatedAtCleared() bool { + _, ok := m.clearedFields[decision.FieldCreatedAt] + return ok +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *DecisionMutation) ResetCreatedAt() { + m.created_at = nil + delete(m.clearedFields, decision.FieldCreatedAt) +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *DecisionMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *DecisionMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (m *DecisionMutation) ClearUpdatedAt() { + m.updated_at = nil + m.clearedFields[decision.FieldUpdatedAt] = struct{}{} +} + +// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. +func (m *DecisionMutation) UpdatedAtCleared() bool { + _, ok := m.clearedFields[decision.FieldUpdatedAt] + return ok +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *DecisionMutation) ResetUpdatedAt() { + m.updated_at = nil + delete(m.clearedFields, decision.FieldUpdatedAt) +} + +// SetUntil sets the "until" field. +func (m *DecisionMutation) SetUntil(t time.Time) { + m.until = &t +} + +// Until returns the value of the "until" field in the mutation. +func (m *DecisionMutation) Until() (r time.Time, exists bool) { + v := m.until + if v == nil { + return + } + return *v, true +} + +// OldUntil returns the old "until" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldUntil(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUntil is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUntil requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUntil: %w", err) + } + return oldValue.Until, nil +} + +// ClearUntil clears the value of the "until" field. +func (m *DecisionMutation) ClearUntil() { + m.until = nil + m.clearedFields[decision.FieldUntil] = struct{}{} +} + +// UntilCleared returns if the "until" field was cleared in this mutation. +func (m *DecisionMutation) UntilCleared() bool { + _, ok := m.clearedFields[decision.FieldUntil] + return ok +} + +// ResetUntil resets all changes to the "until" field. +func (m *DecisionMutation) ResetUntil() { + m.until = nil + delete(m.clearedFields, decision.FieldUntil) +} + +// SetScenario sets the "scenario" field. +func (m *DecisionMutation) SetScenario(s string) { + m.scenario = &s +} + +// Scenario returns the value of the "scenario" field in the mutation. +func (m *DecisionMutation) Scenario() (r string, exists bool) { + v := m.scenario + if v == nil { + return + } + return *v, true +} + +// OldScenario returns the old "scenario" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldScenario(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScenario is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScenario requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScenario: %w", err) + } + return oldValue.Scenario, nil +} + +// ResetScenario resets all changes to the "scenario" field. +func (m *DecisionMutation) ResetScenario() { + m.scenario = nil +} + +// SetType sets the "type" field. +func (m *DecisionMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the value of the "type" field in the mutation. +func (m *DecisionMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType resets all changes to the "type" field. +func (m *DecisionMutation) ResetType() { + m._type = nil +} + +// SetStartIP sets the "start_ip" field. +func (m *DecisionMutation) SetStartIP(i int64) { + m.start_ip = &i + m.addstart_ip = nil +} + +// StartIP returns the value of the "start_ip" field in the mutation. +func (m *DecisionMutation) StartIP() (r int64, exists bool) { + v := m.start_ip + if v == nil { + return + } + return *v, true +} + +// OldStartIP returns the old "start_ip" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldStartIP(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartIP is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartIP requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartIP: %w", err) + } + return oldValue.StartIP, nil +} + +// AddStartIP adds i to the "start_ip" field. +func (m *DecisionMutation) AddStartIP(i int64) { + if m.addstart_ip != nil { + *m.addstart_ip += i + } else { + m.addstart_ip = &i + } +} + +// AddedStartIP returns the value that was added to the "start_ip" field in this mutation. +func (m *DecisionMutation) AddedStartIP() (r int64, exists bool) { + v := m.addstart_ip + if v == nil { + return + } + return *v, true +} + +// ClearStartIP clears the value of the "start_ip" field. +func (m *DecisionMutation) ClearStartIP() { + m.start_ip = nil + m.addstart_ip = nil + m.clearedFields[decision.FieldStartIP] = struct{}{} +} + +// StartIPCleared returns if the "start_ip" field was cleared in this mutation. +func (m *DecisionMutation) StartIPCleared() bool { + _, ok := m.clearedFields[decision.FieldStartIP] + return ok +} + +// ResetStartIP resets all changes to the "start_ip" field. +func (m *DecisionMutation) ResetStartIP() { + m.start_ip = nil + m.addstart_ip = nil + delete(m.clearedFields, decision.FieldStartIP) +} + +// SetEndIP sets the "end_ip" field. +func (m *DecisionMutation) SetEndIP(i int64) { + m.end_ip = &i + m.addend_ip = nil +} + +// EndIP returns the value of the "end_ip" field in the mutation. +func (m *DecisionMutation) EndIP() (r int64, exists bool) { + v := m.end_ip + if v == nil { + return + } + return *v, true +} + +// OldEndIP returns the old "end_ip" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldEndIP(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndIP is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndIP requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndIP: %w", err) + } + return oldValue.EndIP, nil +} + +// AddEndIP adds i to the "end_ip" field. +func (m *DecisionMutation) AddEndIP(i int64) { + if m.addend_ip != nil { + *m.addend_ip += i + } else { + m.addend_ip = &i + } +} + +// AddedEndIP returns the value that was added to the "end_ip" field in this mutation. +func (m *DecisionMutation) AddedEndIP() (r int64, exists bool) { + v := m.addend_ip + if v == nil { + return + } + return *v, true +} + +// ClearEndIP clears the value of the "end_ip" field. +func (m *DecisionMutation) ClearEndIP() { + m.end_ip = nil + m.addend_ip = nil + m.clearedFields[decision.FieldEndIP] = struct{}{} +} + +// EndIPCleared returns if the "end_ip" field was cleared in this mutation. +func (m *DecisionMutation) EndIPCleared() bool { + _, ok := m.clearedFields[decision.FieldEndIP] + return ok +} + +// ResetEndIP resets all changes to the "end_ip" field. +func (m *DecisionMutation) ResetEndIP() { + m.end_ip = nil + m.addend_ip = nil + delete(m.clearedFields, decision.FieldEndIP) +} + +// SetStartSuffix sets the "start_suffix" field. +func (m *DecisionMutation) SetStartSuffix(i int64) { + m.start_suffix = &i + m.addstart_suffix = nil +} + +// StartSuffix returns the value of the "start_suffix" field in the mutation. +func (m *DecisionMutation) StartSuffix() (r int64, exists bool) { + v := m.start_suffix + if v == nil { + return + } + return *v, true +} + +// OldStartSuffix returns the old "start_suffix" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldStartSuffix(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartSuffix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartSuffix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartSuffix: %w", err) + } + return oldValue.StartSuffix, nil +} + +// AddStartSuffix adds i to the "start_suffix" field. +func (m *DecisionMutation) AddStartSuffix(i int64) { + if m.addstart_suffix != nil { + *m.addstart_suffix += i + } else { + m.addstart_suffix = &i + } +} + +// AddedStartSuffix returns the value that was added to the "start_suffix" field in this mutation. +func (m *DecisionMutation) AddedStartSuffix() (r int64, exists bool) { + v := m.addstart_suffix + if v == nil { + return + } + return *v, true +} + +// ClearStartSuffix clears the value of the "start_suffix" field. +func (m *DecisionMutation) ClearStartSuffix() { + m.start_suffix = nil + m.addstart_suffix = nil + m.clearedFields[decision.FieldStartSuffix] = struct{}{} +} + +// StartSuffixCleared returns if the "start_suffix" field was cleared in this mutation. +func (m *DecisionMutation) StartSuffixCleared() bool { + _, ok := m.clearedFields[decision.FieldStartSuffix] + return ok +} + +// ResetStartSuffix resets all changes to the "start_suffix" field. +func (m *DecisionMutation) ResetStartSuffix() { + m.start_suffix = nil + m.addstart_suffix = nil + delete(m.clearedFields, decision.FieldStartSuffix) +} + +// SetEndSuffix sets the "end_suffix" field. +func (m *DecisionMutation) SetEndSuffix(i int64) { + m.end_suffix = &i + m.addend_suffix = nil +} + +// EndSuffix returns the value of the "end_suffix" field in the mutation. +func (m *DecisionMutation) EndSuffix() (r int64, exists bool) { + v := m.end_suffix + if v == nil { + return + } + return *v, true +} + +// OldEndSuffix returns the old "end_suffix" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldEndSuffix(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndSuffix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndSuffix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndSuffix: %w", err) + } + return oldValue.EndSuffix, nil +} + +// AddEndSuffix adds i to the "end_suffix" field. +func (m *DecisionMutation) AddEndSuffix(i int64) { + if m.addend_suffix != nil { + *m.addend_suffix += i + } else { + m.addend_suffix = &i + } +} + +// AddedEndSuffix returns the value that was added to the "end_suffix" field in this mutation. +func (m *DecisionMutation) AddedEndSuffix() (r int64, exists bool) { + v := m.addend_suffix + if v == nil { + return + } + return *v, true +} + +// ClearEndSuffix clears the value of the "end_suffix" field. +func (m *DecisionMutation) ClearEndSuffix() { + m.end_suffix = nil + m.addend_suffix = nil + m.clearedFields[decision.FieldEndSuffix] = struct{}{} +} + +// EndSuffixCleared returns if the "end_suffix" field was cleared in this mutation. +func (m *DecisionMutation) EndSuffixCleared() bool { + _, ok := m.clearedFields[decision.FieldEndSuffix] + return ok +} + +// ResetEndSuffix resets all changes to the "end_suffix" field. +func (m *DecisionMutation) ResetEndSuffix() { + m.end_suffix = nil + m.addend_suffix = nil + delete(m.clearedFields, decision.FieldEndSuffix) +} + +// SetIPSize sets the "ip_size" field. +func (m *DecisionMutation) SetIPSize(i int64) { + m.ip_size = &i + m.addip_size = nil +} + +// IPSize returns the value of the "ip_size" field in the mutation. +func (m *DecisionMutation) IPSize() (r int64, exists bool) { + v := m.ip_size + if v == nil { + return + } + return *v, true +} + +// OldIPSize returns the old "ip_size" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldIPSize(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIPSize is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIPSize requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIPSize: %w", err) + } + return oldValue.IPSize, nil +} + +// AddIPSize adds i to the "ip_size" field. +func (m *DecisionMutation) AddIPSize(i int64) { + if m.addip_size != nil { + *m.addip_size += i + } else { + m.addip_size = &i + } +} + +// AddedIPSize returns the value that was added to the "ip_size" field in this mutation. +func (m *DecisionMutation) AddedIPSize() (r int64, exists bool) { + v := m.addip_size + if v == nil { + return + } + return *v, true +} + +// ClearIPSize clears the value of the "ip_size" field. +func (m *DecisionMutation) ClearIPSize() { + m.ip_size = nil + m.addip_size = nil + m.clearedFields[decision.FieldIPSize] = struct{}{} +} + +// IPSizeCleared returns if the "ip_size" field was cleared in this mutation. +func (m *DecisionMutation) IPSizeCleared() bool { + _, ok := m.clearedFields[decision.FieldIPSize] + return ok +} + +// ResetIPSize resets all changes to the "ip_size" field. +func (m *DecisionMutation) ResetIPSize() { + m.ip_size = nil + m.addip_size = nil + delete(m.clearedFields, decision.FieldIPSize) +} + +// SetScope sets the "scope" field. +func (m *DecisionMutation) SetScope(s string) { + m.scope = &s +} + +// Scope returns the value of the "scope" field in the mutation. +func (m *DecisionMutation) Scope() (r string, exists bool) { + v := m.scope + if v == nil { + return + } + return *v, true +} + +// OldScope returns the old "scope" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldScope(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScope is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScope requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScope: %w", err) + } + return oldValue.Scope, nil +} + +// ResetScope resets all changes to the "scope" field. +func (m *DecisionMutation) ResetScope() { + m.scope = nil +} + +// SetValue sets the "value" field. +func (m *DecisionMutation) SetValue(s string) { + m.value = &s +} + +// Value returns the value of the "value" field in the mutation. +func (m *DecisionMutation) Value() (r string, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old "value" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// ResetValue resets all changes to the "value" field. +func (m *DecisionMutation) ResetValue() { + m.value = nil +} + +// SetOrigin sets the "origin" field. +func (m *DecisionMutation) SetOrigin(s string) { + m.origin = &s +} + +// Origin returns the value of the "origin" field in the mutation. +func (m *DecisionMutation) Origin() (r string, exists bool) { + v := m.origin + if v == nil { + return + } + return *v, true +} + +// OldOrigin returns the old "origin" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldOrigin(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOrigin is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOrigin requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOrigin: %w", err) + } + return oldValue.Origin, nil +} + +// ResetOrigin resets all changes to the "origin" field. +func (m *DecisionMutation) ResetOrigin() { + m.origin = nil +} + +// SetSimulated sets the "simulated" field. +func (m *DecisionMutation) SetSimulated(b bool) { + m.simulated = &b +} + +// Simulated returns the value of the "simulated" field in the mutation. +func (m *DecisionMutation) Simulated() (r bool, exists bool) { + v := m.simulated + if v == nil { + return + } + return *v, true +} + +// OldSimulated returns the old "simulated" field's value of the Decision entity. +// If the Decision object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DecisionMutation) OldSimulated(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSimulated is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSimulated requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSimulated: %w", err) + } + return oldValue.Simulated, nil +} + +// ResetSimulated resets all changes to the "simulated" field. +func (m *DecisionMutation) ResetSimulated() { + m.simulated = nil +} + +// SetOwnerID sets the "owner" edge to the Alert entity by id. +func (m *DecisionMutation) SetOwnerID(id int) { + m.owner = &id +} + +// ClearOwner clears the "owner" edge to the Alert entity. +func (m *DecisionMutation) ClearOwner() { + m.clearedowner = true +} + +// OwnerCleared reports if the "owner" edge to the Alert entity was cleared. +func (m *DecisionMutation) OwnerCleared() bool { + return m.clearedowner +} + +// OwnerID returns the "owner" edge ID in the mutation. +func (m *DecisionMutation) OwnerID() (id int, exists bool) { + if m.owner != nil { + return *m.owner, true + } + return +} + +// OwnerIDs returns the "owner" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// OwnerID instead. It exists only for internal usage by the builders. +func (m *DecisionMutation) OwnerIDs() (ids []int) { + if id := m.owner; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetOwner resets all changes to the "owner" edge. +func (m *DecisionMutation) ResetOwner() { + m.owner = nil + m.clearedowner = false +} + +// Where appends a list predicates to the DecisionMutation builder. +func (m *DecisionMutation) Where(ps ...predicate.Decision) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *DecisionMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Decision). +func (m *DecisionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DecisionMutation) Fields() []string { + fields := make([]string, 0, 14) + if m.created_at != nil { + fields = append(fields, decision.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, decision.FieldUpdatedAt) + } + if m.until != nil { + fields = append(fields, decision.FieldUntil) + } + if m.scenario != nil { + fields = append(fields, decision.FieldScenario) + } + if m._type != nil { + fields = append(fields, decision.FieldType) + } + if m.start_ip != nil { + fields = append(fields, decision.FieldStartIP) + } + if m.end_ip != nil { + fields = append(fields, decision.FieldEndIP) + } + if m.start_suffix != nil { + fields = append(fields, decision.FieldStartSuffix) + } + if m.end_suffix != nil { + fields = append(fields, decision.FieldEndSuffix) + } + if m.ip_size != nil { + fields = append(fields, decision.FieldIPSize) + } + if m.scope != nil { + fields = append(fields, decision.FieldScope) + } + if m.value != nil { + fields = append(fields, decision.FieldValue) + } + if m.origin != nil { + fields = append(fields, decision.FieldOrigin) + } + if m.simulated != nil { + fields = append(fields, decision.FieldSimulated) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DecisionMutation) Field(name string) (ent.Value, bool) { + switch name { + case decision.FieldCreatedAt: + return m.CreatedAt() + case decision.FieldUpdatedAt: + return m.UpdatedAt() + case decision.FieldUntil: + return m.Until() + case decision.FieldScenario: + return m.Scenario() + case decision.FieldType: + return m.GetType() + case decision.FieldStartIP: + return m.StartIP() + case decision.FieldEndIP: + return m.EndIP() + case decision.FieldStartSuffix: + return m.StartSuffix() + case decision.FieldEndSuffix: + return m.EndSuffix() + case decision.FieldIPSize: + return m.IPSize() + case decision.FieldScope: + return m.Scope() + case decision.FieldValue: + return m.Value() + case decision.FieldOrigin: + return m.Origin() + case decision.FieldSimulated: + return m.Simulated() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DecisionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case decision.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case decision.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case decision.FieldUntil: + return m.OldUntil(ctx) + case decision.FieldScenario: + return m.OldScenario(ctx) + case decision.FieldType: + return m.OldType(ctx) + case decision.FieldStartIP: + return m.OldStartIP(ctx) + case decision.FieldEndIP: + return m.OldEndIP(ctx) + case decision.FieldStartSuffix: + return m.OldStartSuffix(ctx) + case decision.FieldEndSuffix: + return m.OldEndSuffix(ctx) + case decision.FieldIPSize: + return m.OldIPSize(ctx) + case decision.FieldScope: + return m.OldScope(ctx) + case decision.FieldValue: + return m.OldValue(ctx) + case decision.FieldOrigin: + return m.OldOrigin(ctx) + case decision.FieldSimulated: + return m.OldSimulated(ctx) + } + return nil, fmt.Errorf("unknown Decision field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DecisionMutation) SetField(name string, value ent.Value) error { + switch name { + case decision.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case decision.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case decision.FieldUntil: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUntil(v) + return nil + case decision.FieldScenario: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScenario(v) + return nil + case decision.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case decision.FieldStartIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartIP(v) + return nil + case decision.FieldEndIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEndIP(v) + return nil + case decision.FieldStartSuffix: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartSuffix(v) + return nil + case decision.FieldEndSuffix: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEndSuffix(v) + return nil + case decision.FieldIPSize: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIPSize(v) + return nil + case decision.FieldScope: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScope(v) + return nil + case decision.FieldValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + case decision.FieldOrigin: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOrigin(v) + return nil + case decision.FieldSimulated: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSimulated(v) + return nil + } + return fmt.Errorf("unknown Decision field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DecisionMutation) AddedFields() []string { + var fields []string + if m.addstart_ip != nil { + fields = append(fields, decision.FieldStartIP) + } + if m.addend_ip != nil { + fields = append(fields, decision.FieldEndIP) + } + if m.addstart_suffix != nil { + fields = append(fields, decision.FieldStartSuffix) + } + if m.addend_suffix != nil { + fields = append(fields, decision.FieldEndSuffix) + } + if m.addip_size != nil { + fields = append(fields, decision.FieldIPSize) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DecisionMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case decision.FieldStartIP: + return m.AddedStartIP() + case decision.FieldEndIP: + return m.AddedEndIP() + case decision.FieldStartSuffix: + return m.AddedStartSuffix() + case decision.FieldEndSuffix: + return m.AddedEndSuffix() + case decision.FieldIPSize: + return m.AddedIPSize() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DecisionMutation) AddField(name string, value ent.Value) error { + switch name { + case decision.FieldStartIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddStartIP(v) + return nil + case decision.FieldEndIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddEndIP(v) + return nil + case decision.FieldStartSuffix: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddStartSuffix(v) + return nil + case decision.FieldEndSuffix: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddEndSuffix(v) + return nil + case decision.FieldIPSize: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddIPSize(v) + return nil + } + return fmt.Errorf("unknown Decision numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DecisionMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(decision.FieldCreatedAt) { + fields = append(fields, decision.FieldCreatedAt) + } + if m.FieldCleared(decision.FieldUpdatedAt) { + fields = append(fields, decision.FieldUpdatedAt) + } + if m.FieldCleared(decision.FieldUntil) { + fields = append(fields, decision.FieldUntil) + } + if m.FieldCleared(decision.FieldStartIP) { + fields = append(fields, decision.FieldStartIP) + } + if m.FieldCleared(decision.FieldEndIP) { + fields = append(fields, decision.FieldEndIP) + } + if m.FieldCleared(decision.FieldStartSuffix) { + fields = append(fields, decision.FieldStartSuffix) + } + if m.FieldCleared(decision.FieldEndSuffix) { + fields = append(fields, decision.FieldEndSuffix) + } + if m.FieldCleared(decision.FieldIPSize) { + fields = append(fields, decision.FieldIPSize) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DecisionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DecisionMutation) ClearField(name string) error { + switch name { + case decision.FieldCreatedAt: + m.ClearCreatedAt() + return nil + case decision.FieldUpdatedAt: + m.ClearUpdatedAt() + return nil + case decision.FieldUntil: + m.ClearUntil() + return nil + case decision.FieldStartIP: + m.ClearStartIP() + return nil + case decision.FieldEndIP: + m.ClearEndIP() + return nil + case decision.FieldStartSuffix: + m.ClearStartSuffix() + return nil + case decision.FieldEndSuffix: + m.ClearEndSuffix() + return nil + case decision.FieldIPSize: + m.ClearIPSize() + return nil + } + return fmt.Errorf("unknown Decision nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DecisionMutation) ResetField(name string) error { + switch name { + case decision.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case decision.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case decision.FieldUntil: + m.ResetUntil() + return nil + case decision.FieldScenario: + m.ResetScenario() + return nil + case decision.FieldType: + m.ResetType() + return nil + case decision.FieldStartIP: + m.ResetStartIP() + return nil + case decision.FieldEndIP: + m.ResetEndIP() + return nil + case decision.FieldStartSuffix: + m.ResetStartSuffix() + return nil + case decision.FieldEndSuffix: + m.ResetEndSuffix() + return nil + case decision.FieldIPSize: + m.ResetIPSize() + return nil + case decision.FieldScope: + m.ResetScope() + return nil + case decision.FieldValue: + m.ResetValue() + return nil + case decision.FieldOrigin: + m.ResetOrigin() + return nil + case decision.FieldSimulated: + m.ResetSimulated() + return nil + } + return fmt.Errorf("unknown Decision field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DecisionMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.owner != nil { + edges = append(edges, decision.EdgeOwner) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DecisionMutation) AddedIDs(name string) []ent.Value { + switch name { + case decision.EdgeOwner: + if id := m.owner; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DecisionMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DecisionMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DecisionMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedowner { + edges = append(edges, decision.EdgeOwner) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DecisionMutation) EdgeCleared(name string) bool { + switch name { + case decision.EdgeOwner: + return m.clearedowner + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DecisionMutation) ClearEdge(name string) error { + switch name { + case decision.EdgeOwner: + m.ClearOwner() + return nil + } + return fmt.Errorf("unknown Decision unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DecisionMutation) ResetEdge(name string) error { + switch name { + case decision.EdgeOwner: + m.ResetOwner() + return nil + } + return fmt.Errorf("unknown Decision edge %s", name) +} + +// EventMutation represents an operation that mutates the Event nodes in the graph. +type EventMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + time *time.Time + serialized *string + clearedFields map[string]struct{} + owner *int + clearedowner bool + done bool + oldValue func(context.Context) (*Event, error) + predicates []predicate.Event +} + +var _ ent.Mutation = (*EventMutation)(nil) + +// eventOption allows management of the mutation configuration using functional options. +type eventOption func(*EventMutation) + +// newEventMutation creates new mutation for the Event entity. +func newEventMutation(c config, op Op, opts ...eventOption) *EventMutation { + m := &EventMutation{ + config: c, + op: op, + typ: TypeEvent, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withEventID sets the ID field of the mutation. +func withEventID(id int) eventOption { + return func(m *EventMutation) { + var ( + err error + once sync.Once + value *Event + ) + m.oldValue = func(ctx context.Context) (*Event, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Event.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withEvent sets the old Event of the mutation. +func withEvent(node *Event) eventOption { + return func(m *EventMutation) { + m.oldValue = func(context.Context) (*Event, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m EventMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m EventMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *EventMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *EventMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Event.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *EventMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *EventMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Event entity. +// If the Event object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (m *EventMutation) ClearCreatedAt() { + m.created_at = nil + m.clearedFields[event.FieldCreatedAt] = struct{}{} +} + +// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. +func (m *EventMutation) CreatedAtCleared() bool { + _, ok := m.clearedFields[event.FieldCreatedAt] + return ok +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *EventMutation) ResetCreatedAt() { + m.created_at = nil + delete(m.clearedFields, event.FieldCreatedAt) +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *EventMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *EventMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Event entity. +// If the Event object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (m *EventMutation) ClearUpdatedAt() { + m.updated_at = nil + m.clearedFields[event.FieldUpdatedAt] = struct{}{} +} + +// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. +func (m *EventMutation) UpdatedAtCleared() bool { + _, ok := m.clearedFields[event.FieldUpdatedAt] + return ok +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *EventMutation) ResetUpdatedAt() { + m.updated_at = nil + delete(m.clearedFields, event.FieldUpdatedAt) +} + +// SetTime sets the "time" field. +func (m *EventMutation) SetTime(t time.Time) { + m.time = &t +} + +// Time returns the value of the "time" field in the mutation. +func (m *EventMutation) Time() (r time.Time, exists bool) { + v := m.time + if v == nil { + return + } + return *v, true +} + +// OldTime returns the old "time" field's value of the Event entity. +// If the Event object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventMutation) OldTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTime: %w", err) + } + return oldValue.Time, nil +} + +// ResetTime resets all changes to the "time" field. +func (m *EventMutation) ResetTime() { + m.time = nil +} + +// SetSerialized sets the "serialized" field. +func (m *EventMutation) SetSerialized(s string) { + m.serialized = &s +} + +// Serialized returns the value of the "serialized" field in the mutation. +func (m *EventMutation) Serialized() (r string, exists bool) { + v := m.serialized + if v == nil { + return + } + return *v, true +} + +// OldSerialized returns the old "serialized" field's value of the Event entity. +// If the Event object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EventMutation) OldSerialized(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSerialized is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSerialized requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSerialized: %w", err) + } + return oldValue.Serialized, nil +} + +// ResetSerialized resets all changes to the "serialized" field. +func (m *EventMutation) ResetSerialized() { + m.serialized = nil +} + +// SetOwnerID sets the "owner" edge to the Alert entity by id. +func (m *EventMutation) SetOwnerID(id int) { + m.owner = &id +} + +// ClearOwner clears the "owner" edge to the Alert entity. +func (m *EventMutation) ClearOwner() { + m.clearedowner = true +} + +// OwnerCleared reports if the "owner" edge to the Alert entity was cleared. +func (m *EventMutation) OwnerCleared() bool { + return m.clearedowner +} + +// OwnerID returns the "owner" edge ID in the mutation. +func (m *EventMutation) OwnerID() (id int, exists bool) { + if m.owner != nil { + return *m.owner, true + } + return +} + +// OwnerIDs returns the "owner" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// OwnerID instead. It exists only for internal usage by the builders. +func (m *EventMutation) OwnerIDs() (ids []int) { + if id := m.owner; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetOwner resets all changes to the "owner" edge. +func (m *EventMutation) ResetOwner() { + m.owner = nil + m.clearedowner = false +} + +// Where appends a list predicates to the EventMutation builder. +func (m *EventMutation) Where(ps ...predicate.Event) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *EventMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Event). +func (m *EventMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *EventMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.created_at != nil { + fields = append(fields, event.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, event.FieldUpdatedAt) + } + if m.time != nil { + fields = append(fields, event.FieldTime) + } + if m.serialized != nil { + fields = append(fields, event.FieldSerialized) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *EventMutation) Field(name string) (ent.Value, bool) { + switch name { + case event.FieldCreatedAt: + return m.CreatedAt() + case event.FieldUpdatedAt: + return m.UpdatedAt() + case event.FieldTime: + return m.Time() + case event.FieldSerialized: + return m.Serialized() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *EventMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case event.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case event.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case event.FieldTime: + return m.OldTime(ctx) + case event.FieldSerialized: + return m.OldSerialized(ctx) + } + return nil, fmt.Errorf("unknown Event field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *EventMutation) SetField(name string, value ent.Value) error { + switch name { + case event.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case event.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case event.FieldTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTime(v) + return nil + case event.FieldSerialized: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSerialized(v) + return nil + } + return fmt.Errorf("unknown Event field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *EventMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *EventMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *EventMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Event numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *EventMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(event.FieldCreatedAt) { + fields = append(fields, event.FieldCreatedAt) + } + if m.FieldCleared(event.FieldUpdatedAt) { + fields = append(fields, event.FieldUpdatedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *EventMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *EventMutation) ClearField(name string) error { + switch name { + case event.FieldCreatedAt: + m.ClearCreatedAt() + return nil + case event.FieldUpdatedAt: + m.ClearUpdatedAt() + return nil + } + return fmt.Errorf("unknown Event nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *EventMutation) ResetField(name string) error { + switch name { + case event.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case event.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case event.FieldTime: + m.ResetTime() + return nil + case event.FieldSerialized: + m.ResetSerialized() + return nil + } + return fmt.Errorf("unknown Event field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *EventMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.owner != nil { + edges = append(edges, event.EdgeOwner) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *EventMutation) AddedIDs(name string) []ent.Value { + switch name { + case event.EdgeOwner: + if id := m.owner; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *EventMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *EventMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *EventMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedowner { + edges = append(edges, event.EdgeOwner) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *EventMutation) EdgeCleared(name string) bool { + switch name { + case event.EdgeOwner: + return m.clearedowner + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *EventMutation) ClearEdge(name string) error { + switch name { + case event.EdgeOwner: + m.ClearOwner() + return nil + } + return fmt.Errorf("unknown Event unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *EventMutation) ResetEdge(name string) error { + switch name { + case event.EdgeOwner: + m.ResetOwner() + return nil + } + return fmt.Errorf("unknown Event edge %s", name) +} + +// MachineMutation represents an operation that mutates the Machine nodes in the graph. +type MachineMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + last_push *time.Time + last_heartbeat *time.Time + machineId *string + password *string + ipAddress *string + scenarios *string + version *string + isValidated *bool + status *string + auth_type *string + clearedFields map[string]struct{} + alerts map[int]struct{} + removedalerts map[int]struct{} + clearedalerts bool + done bool + oldValue func(context.Context) (*Machine, error) + predicates []predicate.Machine +} + +var _ ent.Mutation = (*MachineMutation)(nil) + +// machineOption allows management of the mutation configuration using functional options. +type machineOption func(*MachineMutation) + +// newMachineMutation creates new mutation for the Machine entity. +func newMachineMutation(c config, op Op, opts ...machineOption) *MachineMutation { + m := &MachineMutation{ + config: c, + op: op, + typ: TypeMachine, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withMachineID sets the ID field of the mutation. +func withMachineID(id int) machineOption { + return func(m *MachineMutation) { + var ( + err error + once sync.Once + value *Machine + ) + m.oldValue = func(ctx context.Context) (*Machine, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Machine.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withMachine sets the old Machine of the mutation. +func withMachine(node *Machine) machineOption { + return func(m *MachineMutation) { + m.oldValue = func(context.Context) (*Machine, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m MachineMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m MachineMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *MachineMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *MachineMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Machine.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *MachineMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *MachineMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (m *MachineMutation) ClearCreatedAt() { + m.created_at = nil + m.clearedFields[machine.FieldCreatedAt] = struct{}{} +} + +// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. +func (m *MachineMutation) CreatedAtCleared() bool { + _, ok := m.clearedFields[machine.FieldCreatedAt] + return ok +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *MachineMutation) ResetCreatedAt() { + m.created_at = nil + delete(m.clearedFields, machine.FieldCreatedAt) +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *MachineMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *MachineMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (m *MachineMutation) ClearUpdatedAt() { + m.updated_at = nil + m.clearedFields[machine.FieldUpdatedAt] = struct{}{} +} + +// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. +func (m *MachineMutation) UpdatedAtCleared() bool { + _, ok := m.clearedFields[machine.FieldUpdatedAt] + return ok +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *MachineMutation) ResetUpdatedAt() { + m.updated_at = nil + delete(m.clearedFields, machine.FieldUpdatedAt) +} + +// SetLastPush sets the "last_push" field. +func (m *MachineMutation) SetLastPush(t time.Time) { + m.last_push = &t +} + +// LastPush returns the value of the "last_push" field in the mutation. +func (m *MachineMutation) LastPush() (r time.Time, exists bool) { + v := m.last_push + if v == nil { + return + } + return *v, true +} + +// OldLastPush returns the old "last_push" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldLastPush(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLastPush is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLastPush requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastPush: %w", err) + } + return oldValue.LastPush, nil +} + +// ClearLastPush clears the value of the "last_push" field. +func (m *MachineMutation) ClearLastPush() { + m.last_push = nil + m.clearedFields[machine.FieldLastPush] = struct{}{} +} + +// LastPushCleared returns if the "last_push" field was cleared in this mutation. +func (m *MachineMutation) LastPushCleared() bool { + _, ok := m.clearedFields[machine.FieldLastPush] + return ok +} + +// ResetLastPush resets all changes to the "last_push" field. +func (m *MachineMutation) ResetLastPush() { + m.last_push = nil + delete(m.clearedFields, machine.FieldLastPush) +} + +// SetLastHeartbeat sets the "last_heartbeat" field. +func (m *MachineMutation) SetLastHeartbeat(t time.Time) { + m.last_heartbeat = &t +} + +// LastHeartbeat returns the value of the "last_heartbeat" field in the mutation. +func (m *MachineMutation) LastHeartbeat() (r time.Time, exists bool) { + v := m.last_heartbeat + if v == nil { + return + } + return *v, true +} + +// OldLastHeartbeat returns the old "last_heartbeat" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldLastHeartbeat(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLastHeartbeat is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLastHeartbeat requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastHeartbeat: %w", err) + } + return oldValue.LastHeartbeat, nil +} + +// ClearLastHeartbeat clears the value of the "last_heartbeat" field. +func (m *MachineMutation) ClearLastHeartbeat() { + m.last_heartbeat = nil + m.clearedFields[machine.FieldLastHeartbeat] = struct{}{} +} + +// LastHeartbeatCleared returns if the "last_heartbeat" field was cleared in this mutation. +func (m *MachineMutation) LastHeartbeatCleared() bool { + _, ok := m.clearedFields[machine.FieldLastHeartbeat] + return ok +} + +// ResetLastHeartbeat resets all changes to the "last_heartbeat" field. +func (m *MachineMutation) ResetLastHeartbeat() { + m.last_heartbeat = nil + delete(m.clearedFields, machine.FieldLastHeartbeat) +} + +// SetMachineId sets the "machineId" field. +func (m *MachineMutation) SetMachineId(s string) { + m.machineId = &s +} + +// MachineId returns the value of the "machineId" field in the mutation. +func (m *MachineMutation) MachineId() (r string, exists bool) { + v := m.machineId + if v == nil { + return + } + return *v, true +} + +// OldMachineId returns the old "machineId" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldMachineId(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMachineId is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMachineId requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMachineId: %w", err) + } + return oldValue.MachineId, nil +} + +// ResetMachineId resets all changes to the "machineId" field. +func (m *MachineMutation) ResetMachineId() { + m.machineId = nil +} + +// SetPassword sets the "password" field. +func (m *MachineMutation) SetPassword(s string) { + m.password = &s +} + +// Password returns the value of the "password" field in the mutation. +func (m *MachineMutation) Password() (r string, exists bool) { + v := m.password + if v == nil { + return + } + return *v, true +} + +// OldPassword returns the old "password" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldPassword(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPassword is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPassword requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPassword: %w", err) + } + return oldValue.Password, nil +} + +// ResetPassword resets all changes to the "password" field. +func (m *MachineMutation) ResetPassword() { + m.password = nil +} + +// SetIpAddress sets the "ipAddress" field. +func (m *MachineMutation) SetIpAddress(s string) { + m.ipAddress = &s +} + +// IpAddress returns the value of the "ipAddress" field in the mutation. +func (m *MachineMutation) IpAddress() (r string, exists bool) { + v := m.ipAddress + if v == nil { + return + } + return *v, true +} + +// OldIpAddress returns the old "ipAddress" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldIpAddress(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIpAddress is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIpAddress requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIpAddress: %w", err) + } + return oldValue.IpAddress, nil +} + +// ResetIpAddress resets all changes to the "ipAddress" field. +func (m *MachineMutation) ResetIpAddress() { + m.ipAddress = nil +} + +// SetScenarios sets the "scenarios" field. +func (m *MachineMutation) SetScenarios(s string) { + m.scenarios = &s +} + +// Scenarios returns the value of the "scenarios" field in the mutation. +func (m *MachineMutation) Scenarios() (r string, exists bool) { + v := m.scenarios + if v == nil { + return + } + return *v, true +} + +// OldScenarios returns the old "scenarios" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldScenarios(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScenarios is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScenarios requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScenarios: %w", err) + } + return oldValue.Scenarios, nil +} + +// ClearScenarios clears the value of the "scenarios" field. +func (m *MachineMutation) ClearScenarios() { + m.scenarios = nil + m.clearedFields[machine.FieldScenarios] = struct{}{} +} + +// ScenariosCleared returns if the "scenarios" field was cleared in this mutation. +func (m *MachineMutation) ScenariosCleared() bool { + _, ok := m.clearedFields[machine.FieldScenarios] + return ok +} + +// ResetScenarios resets all changes to the "scenarios" field. +func (m *MachineMutation) ResetScenarios() { + m.scenarios = nil + delete(m.clearedFields, machine.FieldScenarios) +} + +// SetVersion sets the "version" field. +func (m *MachineMutation) SetVersion(s string) { + m.version = &s +} + +// Version returns the value of the "version" field in the mutation. +func (m *MachineMutation) Version() (r string, exists bool) { + v := m.version + if v == nil { + return + } + return *v, true +} + +// OldVersion returns the old "version" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldVersion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldVersion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldVersion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldVersion: %w", err) + } + return oldValue.Version, nil +} + +// ClearVersion clears the value of the "version" field. +func (m *MachineMutation) ClearVersion() { + m.version = nil + m.clearedFields[machine.FieldVersion] = struct{}{} +} + +// VersionCleared returns if the "version" field was cleared in this mutation. +func (m *MachineMutation) VersionCleared() bool { + _, ok := m.clearedFields[machine.FieldVersion] + return ok +} + +// ResetVersion resets all changes to the "version" field. +func (m *MachineMutation) ResetVersion() { + m.version = nil + delete(m.clearedFields, machine.FieldVersion) +} + +// SetIsValidated sets the "isValidated" field. +func (m *MachineMutation) SetIsValidated(b bool) { + m.isValidated = &b +} + +// IsValidated returns the value of the "isValidated" field in the mutation. +func (m *MachineMutation) IsValidated() (r bool, exists bool) { + v := m.isValidated + if v == nil { + return + } + return *v, true +} + +// OldIsValidated returns the old "isValidated" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldIsValidated(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsValidated is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsValidated requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsValidated: %w", err) + } + return oldValue.IsValidated, nil +} + +// ResetIsValidated resets all changes to the "isValidated" field. +func (m *MachineMutation) ResetIsValidated() { + m.isValidated = nil +} + +// SetStatus sets the "status" field. +func (m *MachineMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *MachineMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ClearStatus clears the value of the "status" field. +func (m *MachineMutation) ClearStatus() { + m.status = nil + m.clearedFields[machine.FieldStatus] = struct{}{} +} + +// StatusCleared returns if the "status" field was cleared in this mutation. +func (m *MachineMutation) StatusCleared() bool { + _, ok := m.clearedFields[machine.FieldStatus] + return ok +} + +// ResetStatus resets all changes to the "status" field. +func (m *MachineMutation) ResetStatus() { + m.status = nil + delete(m.clearedFields, machine.FieldStatus) +} + +// SetAuthType sets the "auth_type" field. +func (m *MachineMutation) SetAuthType(s string) { + m.auth_type = &s +} + +// AuthType returns the value of the "auth_type" field in the mutation. +func (m *MachineMutation) AuthType() (r string, exists bool) { + v := m.auth_type + if v == nil { + return + } + return *v, true +} + +// OldAuthType returns the old "auth_type" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldAuthType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAuthType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAuthType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAuthType: %w", err) + } + return oldValue.AuthType, nil +} + +// ResetAuthType resets all changes to the "auth_type" field. +func (m *MachineMutation) ResetAuthType() { + m.auth_type = nil +} + +// AddAlertIDs adds the "alerts" edge to the Alert entity by ids. +func (m *MachineMutation) AddAlertIDs(ids ...int) { + if m.alerts == nil { + m.alerts = make(map[int]struct{}) + } + for i := range ids { + m.alerts[ids[i]] = struct{}{} + } +} + +// ClearAlerts clears the "alerts" edge to the Alert entity. +func (m *MachineMutation) ClearAlerts() { + m.clearedalerts = true +} + +// AlertsCleared reports if the "alerts" edge to the Alert entity was cleared. +func (m *MachineMutation) AlertsCleared() bool { + return m.clearedalerts +} + +// RemoveAlertIDs removes the "alerts" edge to the Alert entity by IDs. +func (m *MachineMutation) RemoveAlertIDs(ids ...int) { + if m.removedalerts == nil { + m.removedalerts = make(map[int]struct{}) + } + for i := range ids { + delete(m.alerts, ids[i]) + m.removedalerts[ids[i]] = struct{}{} + } +} + +// RemovedAlerts returns the removed IDs of the "alerts" edge to the Alert entity. +func (m *MachineMutation) RemovedAlertsIDs() (ids []int) { + for id := range m.removedalerts { + ids = append(ids, id) + } + return +} + +// AlertsIDs returns the "alerts" edge IDs in the mutation. +func (m *MachineMutation) AlertsIDs() (ids []int) { + for id := range m.alerts { + ids = append(ids, id) + } + return +} + +// ResetAlerts resets all changes to the "alerts" edge. +func (m *MachineMutation) ResetAlerts() { + m.alerts = nil + m.clearedalerts = false + m.removedalerts = nil +} + +// Where appends a list predicates to the MachineMutation builder. +func (m *MachineMutation) Where(ps ...predicate.Machine) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *MachineMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Machine). +func (m *MachineMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *MachineMutation) Fields() []string { + fields := make([]string, 0, 12) + if m.created_at != nil { + fields = append(fields, machine.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, machine.FieldUpdatedAt) + } + if m.last_push != nil { + fields = append(fields, machine.FieldLastPush) + } + if m.last_heartbeat != nil { + fields = append(fields, machine.FieldLastHeartbeat) + } + if m.machineId != nil { + fields = append(fields, machine.FieldMachineId) + } + if m.password != nil { + fields = append(fields, machine.FieldPassword) + } + if m.ipAddress != nil { + fields = append(fields, machine.FieldIpAddress) + } + if m.scenarios != nil { + fields = append(fields, machine.FieldScenarios) + } + if m.version != nil { + fields = append(fields, machine.FieldVersion) + } + if m.isValidated != nil { + fields = append(fields, machine.FieldIsValidated) + } + if m.status != nil { + fields = append(fields, machine.FieldStatus) + } + if m.auth_type != nil { + fields = append(fields, machine.FieldAuthType) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *MachineMutation) Field(name string) (ent.Value, bool) { + switch name { + case machine.FieldCreatedAt: + return m.CreatedAt() + case machine.FieldUpdatedAt: + return m.UpdatedAt() + case machine.FieldLastPush: + return m.LastPush() + case machine.FieldLastHeartbeat: + return m.LastHeartbeat() + case machine.FieldMachineId: + return m.MachineId() + case machine.FieldPassword: + return m.Password() + case machine.FieldIpAddress: + return m.IpAddress() + case machine.FieldScenarios: + return m.Scenarios() + case machine.FieldVersion: + return m.Version() + case machine.FieldIsValidated: + return m.IsValidated() + case machine.FieldStatus: + return m.Status() + case machine.FieldAuthType: + return m.AuthType() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case machine.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case machine.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case machine.FieldLastPush: + return m.OldLastPush(ctx) + case machine.FieldLastHeartbeat: + return m.OldLastHeartbeat(ctx) + case machine.FieldMachineId: + return m.OldMachineId(ctx) + case machine.FieldPassword: + return m.OldPassword(ctx) + case machine.FieldIpAddress: + return m.OldIpAddress(ctx) + case machine.FieldScenarios: + return m.OldScenarios(ctx) + case machine.FieldVersion: + return m.OldVersion(ctx) + case machine.FieldIsValidated: + return m.OldIsValidated(ctx) + case machine.FieldStatus: + return m.OldStatus(ctx) + case machine.FieldAuthType: + return m.OldAuthType(ctx) + } + return nil, fmt.Errorf("unknown Machine field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MachineMutation) SetField(name string, value ent.Value) error { + switch name { + case machine.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case machine.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case machine.FieldLastPush: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastPush(v) + return nil + case machine.FieldLastHeartbeat: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastHeartbeat(v) + return nil + case machine.FieldMachineId: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMachineId(v) + return nil + case machine.FieldPassword: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPassword(v) + return nil + case machine.FieldIpAddress: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIpAddress(v) + return nil + case machine.FieldScenarios: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScenarios(v) + return nil + case machine.FieldVersion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetVersion(v) + return nil + case machine.FieldIsValidated: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsValidated(v) + return nil + case machine.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case machine.FieldAuthType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAuthType(v) + return nil + } + return fmt.Errorf("unknown Machine field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *MachineMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *MachineMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MachineMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Machine numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *MachineMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(machine.FieldCreatedAt) { + fields = append(fields, machine.FieldCreatedAt) + } + if m.FieldCleared(machine.FieldUpdatedAt) { + fields = append(fields, machine.FieldUpdatedAt) + } + if m.FieldCleared(machine.FieldLastPush) { + fields = append(fields, machine.FieldLastPush) + } + if m.FieldCleared(machine.FieldLastHeartbeat) { + fields = append(fields, machine.FieldLastHeartbeat) + } + if m.FieldCleared(machine.FieldScenarios) { + fields = append(fields, machine.FieldScenarios) + } + if m.FieldCleared(machine.FieldVersion) { + fields = append(fields, machine.FieldVersion) + } + if m.FieldCleared(machine.FieldStatus) { + fields = append(fields, machine.FieldStatus) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *MachineMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *MachineMutation) ClearField(name string) error { + switch name { + case machine.FieldCreatedAt: + m.ClearCreatedAt() + return nil + case machine.FieldUpdatedAt: + m.ClearUpdatedAt() + return nil + case machine.FieldLastPush: + m.ClearLastPush() + return nil + case machine.FieldLastHeartbeat: + m.ClearLastHeartbeat() + return nil + case machine.FieldScenarios: + m.ClearScenarios() + return nil + case machine.FieldVersion: + m.ClearVersion() + return nil + case machine.FieldStatus: + m.ClearStatus() + return nil + } + return fmt.Errorf("unknown Machine nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *MachineMutation) ResetField(name string) error { + switch name { + case machine.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case machine.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case machine.FieldLastPush: + m.ResetLastPush() + return nil + case machine.FieldLastHeartbeat: + m.ResetLastHeartbeat() + return nil + case machine.FieldMachineId: + m.ResetMachineId() + return nil + case machine.FieldPassword: + m.ResetPassword() + return nil + case machine.FieldIpAddress: + m.ResetIpAddress() + return nil + case machine.FieldScenarios: + m.ResetScenarios() + return nil + case machine.FieldVersion: + m.ResetVersion() + return nil + case machine.FieldIsValidated: + m.ResetIsValidated() + return nil + case machine.FieldStatus: + m.ResetStatus() + return nil + case machine.FieldAuthType: + m.ResetAuthType() + return nil + } + return fmt.Errorf("unknown Machine field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *MachineMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.alerts != nil { + edges = append(edges, machine.EdgeAlerts) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *MachineMutation) AddedIDs(name string) []ent.Value { + switch name { + case machine.EdgeAlerts: + ids := make([]ent.Value, 0, len(m.alerts)) + for id := range m.alerts { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *MachineMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedalerts != nil { + edges = append(edges, machine.EdgeAlerts) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *MachineMutation) RemovedIDs(name string) []ent.Value { + switch name { + case machine.EdgeAlerts: + ids := make([]ent.Value, 0, len(m.removedalerts)) + for id := range m.removedalerts { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *MachineMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedalerts { + edges = append(edges, machine.EdgeAlerts) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *MachineMutation) EdgeCleared(name string) bool { + switch name { + case machine.EdgeAlerts: + return m.clearedalerts + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *MachineMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Machine unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *MachineMutation) ResetEdge(name string) error { + switch name { + case machine.EdgeAlerts: + m.ResetAlerts() + return nil + } + return fmt.Errorf("unknown Machine edge %s", name) +} + +// MetaMutation represents an operation that mutates the Meta nodes in the graph. +type MetaMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + key *string + value *string + clearedFields map[string]struct{} + owner *int + clearedowner bool + done bool + oldValue func(context.Context) (*Meta, error) + predicates []predicate.Meta +} + +var _ ent.Mutation = (*MetaMutation)(nil) + +// metaOption allows management of the mutation configuration using functional options. +type metaOption func(*MetaMutation) + +// newMetaMutation creates new mutation for the Meta entity. +func newMetaMutation(c config, op Op, opts ...metaOption) *MetaMutation { + m := &MetaMutation{ + config: c, + op: op, + typ: TypeMeta, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withMetaID sets the ID field of the mutation. +func withMetaID(id int) metaOption { + return func(m *MetaMutation) { + var ( + err error + once sync.Once + value *Meta + ) + m.oldValue = func(ctx context.Context) (*Meta, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Meta.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withMeta sets the old Meta of the mutation. +func withMeta(node *Meta) metaOption { + return func(m *MetaMutation) { + m.oldValue = func(context.Context) (*Meta, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m MetaMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m MetaMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *MetaMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *MetaMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Meta.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *MetaMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *MetaMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Meta entity. +// If the Meta object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ClearCreatedAt clears the value of the "created_at" field. +func (m *MetaMutation) ClearCreatedAt() { + m.created_at = nil + m.clearedFields[meta.FieldCreatedAt] = struct{}{} +} + +// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. +func (m *MetaMutation) CreatedAtCleared() bool { + _, ok := m.clearedFields[meta.FieldCreatedAt] + return ok +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *MetaMutation) ResetCreatedAt() { + m.created_at = nil + delete(m.clearedFields, meta.FieldCreatedAt) +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *MetaMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *MetaMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Meta entity. +// If the Meta object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ClearUpdatedAt clears the value of the "updated_at" field. +func (m *MetaMutation) ClearUpdatedAt() { + m.updated_at = nil + m.clearedFields[meta.FieldUpdatedAt] = struct{}{} +} + +// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. +func (m *MetaMutation) UpdatedAtCleared() bool { + _, ok := m.clearedFields[meta.FieldUpdatedAt] + return ok +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *MetaMutation) ResetUpdatedAt() { + m.updated_at = nil + delete(m.clearedFields, meta.FieldUpdatedAt) +} + +// SetKey sets the "key" field. +func (m *MetaMutation) SetKey(s string) { + m.key = &s +} + +// Key returns the value of the "key" field in the mutation. +func (m *MetaMutation) Key() (r string, exists bool) { + v := m.key + if v == nil { + return + } + return *v, true +} + +// OldKey returns the old "key" field's value of the Meta entity. +// If the Meta object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetaMutation) OldKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldKey: %w", err) + } + return oldValue.Key, nil +} + +// ResetKey resets all changes to the "key" field. +func (m *MetaMutation) ResetKey() { + m.key = nil +} + +// SetValue sets the "value" field. +func (m *MetaMutation) SetValue(s string) { + m.value = &s +} + +// Value returns the value of the "value" field in the mutation. +func (m *MetaMutation) Value() (r string, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old "value" field's value of the Meta entity. +// If the Meta object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetaMutation) OldValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// ResetValue resets all changes to the "value" field. +func (m *MetaMutation) ResetValue() { + m.value = nil +} + +// SetOwnerID sets the "owner" edge to the Alert entity by id. +func (m *MetaMutation) SetOwnerID(id int) { + m.owner = &id +} + +// ClearOwner clears the "owner" edge to the Alert entity. +func (m *MetaMutation) ClearOwner() { + m.clearedowner = true +} + +// OwnerCleared reports if the "owner" edge to the Alert entity was cleared. +func (m *MetaMutation) OwnerCleared() bool { + return m.clearedowner +} + +// OwnerID returns the "owner" edge ID in the mutation. +func (m *MetaMutation) OwnerID() (id int, exists bool) { + if m.owner != nil { + return *m.owner, true + } + return +} + +// OwnerIDs returns the "owner" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// OwnerID instead. It exists only for internal usage by the builders. +func (m *MetaMutation) OwnerIDs() (ids []int) { + if id := m.owner; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetOwner resets all changes to the "owner" edge. +func (m *MetaMutation) ResetOwner() { + m.owner = nil + m.clearedowner = false +} + +// Where appends a list predicates to the MetaMutation builder. +func (m *MetaMutation) Where(ps ...predicate.Meta) { + m.predicates = append(m.predicates, ps...) +} + +// Op returns the operation name. +func (m *MetaMutation) Op() Op { + return m.op +} + +// Type returns the node type of this mutation (Meta). +func (m *MetaMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *MetaMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.created_at != nil { + fields = append(fields, meta.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, meta.FieldUpdatedAt) + } + if m.key != nil { + fields = append(fields, meta.FieldKey) + } + if m.value != nil { + fields = append(fields, meta.FieldValue) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *MetaMutation) Field(name string) (ent.Value, bool) { + switch name { + case meta.FieldCreatedAt: + return m.CreatedAt() + case meta.FieldUpdatedAt: + return m.UpdatedAt() + case meta.FieldKey: + return m.Key() + case meta.FieldValue: + return m.Value() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *MetaMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case meta.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case meta.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case meta.FieldKey: + return m.OldKey(ctx) + case meta.FieldValue: + return m.OldValue(ctx) + } + return nil, fmt.Errorf("unknown Meta field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MetaMutation) SetField(name string, value ent.Value) error { + switch name { + case meta.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case meta.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case meta.FieldKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetKey(v) + return nil + case meta.FieldValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + } + return fmt.Errorf("unknown Meta field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *MetaMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *MetaMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MetaMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Meta numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *MetaMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(meta.FieldCreatedAt) { + fields = append(fields, meta.FieldCreatedAt) + } + if m.FieldCleared(meta.FieldUpdatedAt) { + fields = append(fields, meta.FieldUpdatedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *MetaMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *MetaMutation) ClearField(name string) error { + switch name { + case meta.FieldCreatedAt: + m.ClearCreatedAt() + return nil + case meta.FieldUpdatedAt: + m.ClearUpdatedAt() + return nil + } + return fmt.Errorf("unknown Meta nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *MetaMutation) ResetField(name string) error { + switch name { + case meta.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case meta.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case meta.FieldKey: + m.ResetKey() + return nil + case meta.FieldValue: + m.ResetValue() + return nil + } + return fmt.Errorf("unknown Meta field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *MetaMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.owner != nil { + edges = append(edges, meta.EdgeOwner) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *MetaMutation) AddedIDs(name string) []ent.Value { + switch name { + case meta.EdgeOwner: + if id := m.owner; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *MetaMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *MetaMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *MetaMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedowner { + edges = append(edges, meta.EdgeOwner) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *MetaMutation) EdgeCleared(name string) bool { + switch name { + case meta.EdgeOwner: + return m.clearedowner + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *MetaMutation) ClearEdge(name string) error { + switch name { + case meta.EdgeOwner: + m.ClearOwner() + return nil + } + return fmt.Errorf("unknown Meta unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *MetaMutation) ResetEdge(name string) error { + switch name { + case meta.EdgeOwner: + m.ResetOwner() + return nil + } + return fmt.Errorf("unknown Meta edge %s", name) +} diff --git a/pkg/database/ent/predicate/predicate.go b/pkg/database/ent/predicate/predicate.go new file mode 100644 index 0000000..3c534cf --- /dev/null +++ b/pkg/database/ent/predicate/predicate.go @@ -0,0 +1,25 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// Alert is the predicate function for alert builders. +type Alert func(*sql.Selector) + +// Bouncer is the predicate function for bouncer builders. +type Bouncer func(*sql.Selector) + +// Decision is the predicate function for decision builders. +type Decision func(*sql.Selector) + +// Event is the predicate function for event builders. +type Event func(*sql.Selector) + +// Machine is the predicate function for machine builders. +type Machine func(*sql.Selector) + +// Meta is the predicate function for meta builders. +type Meta func(*sql.Selector) diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go new file mode 100644 index 0000000..c9bed41 --- /dev/null +++ b/pkg/database/ent/runtime.go @@ -0,0 +1,181 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" +) + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { + alertFields := schema.Alert{}.Fields() + _ = alertFields + // alertDescCreatedAt is the schema descriptor for created_at field. + alertDescCreatedAt := alertFields[0].Descriptor() + // alert.DefaultCreatedAt holds the default value on creation for the created_at field. + alert.DefaultCreatedAt = alertDescCreatedAt.Default.(func() time.Time) + // alert.UpdateDefaultCreatedAt holds the default value on update for the created_at field. + alert.UpdateDefaultCreatedAt = alertDescCreatedAt.UpdateDefault.(func() time.Time) + // alertDescUpdatedAt is the schema descriptor for updated_at field. + alertDescUpdatedAt := alertFields[1].Descriptor() + // alert.DefaultUpdatedAt holds the default value on creation for the updated_at field. + alert.DefaultUpdatedAt = alertDescUpdatedAt.Default.(func() time.Time) + // alert.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + alert.UpdateDefaultUpdatedAt = alertDescUpdatedAt.UpdateDefault.(func() time.Time) + // alertDescBucketId is the schema descriptor for bucketId field. + alertDescBucketId := alertFields[3].Descriptor() + // alert.DefaultBucketId holds the default value on creation for the bucketId field. + alert.DefaultBucketId = alertDescBucketId.Default.(string) + // alertDescMessage is the schema descriptor for message field. + alertDescMessage := alertFields[4].Descriptor() + // alert.DefaultMessage holds the default value on creation for the message field. + alert.DefaultMessage = alertDescMessage.Default.(string) + // alertDescEventsCount is the schema descriptor for eventsCount field. + alertDescEventsCount := alertFields[5].Descriptor() + // alert.DefaultEventsCount holds the default value on creation for the eventsCount field. + alert.DefaultEventsCount = alertDescEventsCount.Default.(int32) + // alertDescStartedAt is the schema descriptor for startedAt field. + alertDescStartedAt := alertFields[6].Descriptor() + // alert.DefaultStartedAt holds the default value on creation for the startedAt field. + alert.DefaultStartedAt = alertDescStartedAt.Default.(func() time.Time) + // alertDescStoppedAt is the schema descriptor for stoppedAt field. + alertDescStoppedAt := alertFields[7].Descriptor() + // alert.DefaultStoppedAt holds the default value on creation for the stoppedAt field. + alert.DefaultStoppedAt = alertDescStoppedAt.Default.(func() time.Time) + // alertDescSimulated is the schema descriptor for simulated field. + alertDescSimulated := alertFields[21].Descriptor() + // alert.DefaultSimulated holds the default value on creation for the simulated field. + alert.DefaultSimulated = alertDescSimulated.Default.(bool) + bouncerFields := schema.Bouncer{}.Fields() + _ = bouncerFields + // bouncerDescCreatedAt is the schema descriptor for created_at field. + bouncerDescCreatedAt := bouncerFields[0].Descriptor() + // bouncer.DefaultCreatedAt holds the default value on creation for the created_at field. + bouncer.DefaultCreatedAt = bouncerDescCreatedAt.Default.(func() time.Time) + // bouncer.UpdateDefaultCreatedAt holds the default value on update for the created_at field. + bouncer.UpdateDefaultCreatedAt = bouncerDescCreatedAt.UpdateDefault.(func() time.Time) + // bouncerDescUpdatedAt is the schema descriptor for updated_at field. + bouncerDescUpdatedAt := bouncerFields[1].Descriptor() + // bouncer.DefaultUpdatedAt holds the default value on creation for the updated_at field. + bouncer.DefaultUpdatedAt = bouncerDescUpdatedAt.Default.(func() time.Time) + // bouncer.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + bouncer.UpdateDefaultUpdatedAt = bouncerDescUpdatedAt.UpdateDefault.(func() time.Time) + // bouncerDescIPAddress is the schema descriptor for ip_address field. + bouncerDescIPAddress := bouncerFields[5].Descriptor() + // bouncer.DefaultIPAddress holds the default value on creation for the ip_address field. + bouncer.DefaultIPAddress = bouncerDescIPAddress.Default.(string) + // bouncerDescUntil is the schema descriptor for until field. + bouncerDescUntil := bouncerFields[8].Descriptor() + // bouncer.DefaultUntil holds the default value on creation for the until field. + bouncer.DefaultUntil = bouncerDescUntil.Default.(func() time.Time) + // bouncerDescLastPull is the schema descriptor for last_pull field. + bouncerDescLastPull := bouncerFields[9].Descriptor() + // bouncer.DefaultLastPull holds the default value on creation for the last_pull field. + bouncer.DefaultLastPull = bouncerDescLastPull.Default.(func() time.Time) + // bouncerDescAuthType is the schema descriptor for auth_type field. + bouncerDescAuthType := bouncerFields[10].Descriptor() + // bouncer.DefaultAuthType holds the default value on creation for the auth_type field. + bouncer.DefaultAuthType = bouncerDescAuthType.Default.(string) + decisionFields := schema.Decision{}.Fields() + _ = decisionFields + // decisionDescCreatedAt is the schema descriptor for created_at field. + decisionDescCreatedAt := decisionFields[0].Descriptor() + // decision.DefaultCreatedAt holds the default value on creation for the created_at field. + decision.DefaultCreatedAt = decisionDescCreatedAt.Default.(func() time.Time) + // decision.UpdateDefaultCreatedAt holds the default value on update for the created_at field. + decision.UpdateDefaultCreatedAt = decisionDescCreatedAt.UpdateDefault.(func() time.Time) + // decisionDescUpdatedAt is the schema descriptor for updated_at field. + decisionDescUpdatedAt := decisionFields[1].Descriptor() + // decision.DefaultUpdatedAt holds the default value on creation for the updated_at field. + decision.DefaultUpdatedAt = decisionDescUpdatedAt.Default.(func() time.Time) + // decision.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + decision.UpdateDefaultUpdatedAt = decisionDescUpdatedAt.UpdateDefault.(func() time.Time) + // decisionDescSimulated is the schema descriptor for simulated field. + decisionDescSimulated := decisionFields[13].Descriptor() + // decision.DefaultSimulated holds the default value on creation for the simulated field. + decision.DefaultSimulated = decisionDescSimulated.Default.(bool) + eventFields := schema.Event{}.Fields() + _ = eventFields + // eventDescCreatedAt is the schema descriptor for created_at field. + eventDescCreatedAt := eventFields[0].Descriptor() + // event.DefaultCreatedAt holds the default value on creation for the created_at field. + event.DefaultCreatedAt = eventDescCreatedAt.Default.(func() time.Time) + // event.UpdateDefaultCreatedAt holds the default value on update for the created_at field. + event.UpdateDefaultCreatedAt = eventDescCreatedAt.UpdateDefault.(func() time.Time) + // eventDescUpdatedAt is the schema descriptor for updated_at field. + eventDescUpdatedAt := eventFields[1].Descriptor() + // event.DefaultUpdatedAt holds the default value on creation for the updated_at field. + event.DefaultUpdatedAt = eventDescUpdatedAt.Default.(func() time.Time) + // event.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + event.UpdateDefaultUpdatedAt = eventDescUpdatedAt.UpdateDefault.(func() time.Time) + // eventDescSerialized is the schema descriptor for serialized field. + eventDescSerialized := eventFields[3].Descriptor() + // event.SerializedValidator is a validator for the "serialized" field. It is called by the builders before save. + event.SerializedValidator = eventDescSerialized.Validators[0].(func(string) error) + machineFields := schema.Machine{}.Fields() + _ = machineFields + // machineDescCreatedAt is the schema descriptor for created_at field. + machineDescCreatedAt := machineFields[0].Descriptor() + // machine.DefaultCreatedAt holds the default value on creation for the created_at field. + machine.DefaultCreatedAt = machineDescCreatedAt.Default.(func() time.Time) + // machine.UpdateDefaultCreatedAt holds the default value on update for the created_at field. + machine.UpdateDefaultCreatedAt = machineDescCreatedAt.UpdateDefault.(func() time.Time) + // machineDescUpdatedAt is the schema descriptor for updated_at field. + machineDescUpdatedAt := machineFields[1].Descriptor() + // machine.DefaultUpdatedAt holds the default value on creation for the updated_at field. + machine.DefaultUpdatedAt = machineDescUpdatedAt.Default.(func() time.Time) + // machine.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + machine.UpdateDefaultUpdatedAt = machineDescUpdatedAt.UpdateDefault.(func() time.Time) + // machineDescLastPush is the schema descriptor for last_push field. + machineDescLastPush := machineFields[2].Descriptor() + // machine.DefaultLastPush holds the default value on creation for the last_push field. + machine.DefaultLastPush = machineDescLastPush.Default.(func() time.Time) + // machine.UpdateDefaultLastPush holds the default value on update for the last_push field. + machine.UpdateDefaultLastPush = machineDescLastPush.UpdateDefault.(func() time.Time) + // machineDescLastHeartbeat is the schema descriptor for last_heartbeat field. + machineDescLastHeartbeat := machineFields[3].Descriptor() + // machine.DefaultLastHeartbeat holds the default value on creation for the last_heartbeat field. + machine.DefaultLastHeartbeat = machineDescLastHeartbeat.Default.(func() time.Time) + // machine.UpdateDefaultLastHeartbeat holds the default value on update for the last_heartbeat field. + machine.UpdateDefaultLastHeartbeat = machineDescLastHeartbeat.UpdateDefault.(func() time.Time) + // machineDescScenarios is the schema descriptor for scenarios field. + machineDescScenarios := machineFields[7].Descriptor() + // machine.ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. + machine.ScenariosValidator = machineDescScenarios.Validators[0].(func(string) error) + // machineDescIsValidated is the schema descriptor for isValidated field. + machineDescIsValidated := machineFields[9].Descriptor() + // machine.DefaultIsValidated holds the default value on creation for the isValidated field. + machine.DefaultIsValidated = machineDescIsValidated.Default.(bool) + // machineDescAuthType is the schema descriptor for auth_type field. + machineDescAuthType := machineFields[11].Descriptor() + // machine.DefaultAuthType holds the default value on creation for the auth_type field. + machine.DefaultAuthType = machineDescAuthType.Default.(string) + metaFields := schema.Meta{}.Fields() + _ = metaFields + // metaDescCreatedAt is the schema descriptor for created_at field. + metaDescCreatedAt := metaFields[0].Descriptor() + // meta.DefaultCreatedAt holds the default value on creation for the created_at field. + meta.DefaultCreatedAt = metaDescCreatedAt.Default.(func() time.Time) + // meta.UpdateDefaultCreatedAt holds the default value on update for the created_at field. + meta.UpdateDefaultCreatedAt = metaDescCreatedAt.UpdateDefault.(func() time.Time) + // metaDescUpdatedAt is the schema descriptor for updated_at field. + metaDescUpdatedAt := metaFields[1].Descriptor() + // meta.DefaultUpdatedAt holds the default value on creation for the updated_at field. + meta.DefaultUpdatedAt = metaDescUpdatedAt.Default.(func() time.Time) + // meta.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + meta.UpdateDefaultUpdatedAt = metaDescUpdatedAt.UpdateDefault.(func() time.Time) + // metaDescValue is the schema descriptor for value field. + metaDescValue := metaFields[3].Descriptor() + // meta.ValueValidator is a validator for the "value" field. It is called by the builders before save. + meta.ValueValidator = metaDescValue.Validators[0].(func(string) error) +} diff --git a/pkg/database/ent/runtime/runtime.go b/pkg/database/ent/runtime/runtime.go new file mode 100644 index 0000000..e64f7bd --- /dev/null +++ b/pkg/database/ent/runtime/runtime.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +// The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go + +const ( + Version = "v0.11.3" // Version of ent codegen. + Sum = "h1:F5FBGAWiDCGder7YT+lqMnyzXl6d0xU3xMBM/SO3CMc=" // Sum of ent codegen. +) diff --git a/pkg/database/ent/schema/alert.go b/pkg/database/ent/schema/alert.go new file mode 100644 index 0000000..1cd6bbb --- /dev/null +++ b/pkg/database/ent/schema/alert.go @@ -0,0 +1,81 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Alert holds the schema definition for the Alert entity. +type Alert struct { + ent.Schema +} + +// Fields of the Alert. +func (Alert) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.Time("updated_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.String("scenario"), + field.String("bucketId").Default("").Optional(), + field.String("message").Default("").Optional(), + field.Int32("eventsCount").Default(0).Optional(), + field.Time("startedAt").Default(types.UtcNow).Optional(), + field.Time("stoppedAt").Default(types.UtcNow).Optional(), + field.String("sourceIp"). + Optional(), + field.String("sourceRange"). + Optional(), + field.String("sourceAsNumber"). + Optional(), + field.String("sourceAsName"). + Optional(), + field.String("sourceCountry"). + Optional(), + field.Float32("sourceLatitude"). + Optional(), + field.Float32("sourceLongitude"). + Optional(), + field.String("sourceScope").Optional(), + field.String("sourceValue").Optional(), + field.Int32("capacity").Optional(), + field.String("leakSpeed").Optional(), + field.String("scenarioVersion").Optional(), + field.String("scenarioHash").Optional(), + field.Bool("simulated").Default(false), + } +} + +// Edges of the Alert. +func (Alert) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("owner", Machine.Type). + Ref("alerts"). + Unique(), + edge.To("decisions", Decision.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("events", Event.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + edge.To("metas", Meta.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), + } +} + +func (Alert) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("id"), + } +} diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go new file mode 100644 index 0000000..c308129 --- /dev/null +++ b/pkg/database/ent/schema/bouncer.go @@ -0,0 +1,39 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Bouncer holds the schema definition for the Bouncer entity. +type Bouncer struct { + ent.Schema +} + +// Fields of the Bouncer. +func (Bouncer) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"created_at"`), + field.Time("updated_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"updated_at"`), + field.String("name").Unique().StructTag(`json:"name"`), + field.String("api_key").StructTag(`json:"api_key"`), // hash of api_key + field.Bool("revoked").StructTag(`json:"revoked"`), + field.String("ip_address").Default("").Optional().StructTag(`json:"ip_address"`), + field.String("type").Optional().StructTag(`json:"type"`), + field.String("version").Optional().StructTag(`json:"version"`), + field.Time("until").Default(types.UtcNow).Optional().StructTag(`json:"until"`), + field.Time("last_pull"). + Default(types.UtcNow).StructTag(`json:"last_pull"`), + field.String("auth_type").StructTag(`json:"auth_type"`).Default(types.ApiKeyAuthType), + } +} + +// Edges of the Bouncer. +func (Bouncer) Edges() []ent.Edge { + return nil +} diff --git a/pkg/database/ent/schema/decision.go b/pkg/database/ent/schema/decision.go new file mode 100644 index 0000000..d89603a --- /dev/null +++ b/pkg/database/ent/schema/decision.go @@ -0,0 +1,58 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Decision holds the schema definition for the Decision entity. +type Decision struct { + ent.Schema +} + +// Fields of the Decision. +func (Decision) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.Time("updated_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.Time("until").Nillable().Optional().SchemaType(map[string]string{ + dialect.MySQL: "datetime", + }), + field.String("scenario"), + field.String("type"), + field.Int64("start_ip").Optional(), + field.Int64("end_ip").Optional(), + field.Int64("start_suffix").Optional(), + field.Int64("end_suffix").Optional(), + field.Int64("ip_size").Optional(), + field.String("scope"), + field.String("value"), + field.String("origin"), + field.Bool("simulated").Default(false), + } +} + +// Edges of the Decision. +func (Decision) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("owner", Alert.Type). + Ref("decisions"). + Unique(), + } +} + +func (Decision) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("start_ip", "end_ip"), + index.Fields("value"), + index.Fields("until"), + } +} diff --git a/pkg/database/ent/schema/event.go b/pkg/database/ent/schema/event.go new file mode 100644 index 0000000..f312940 --- /dev/null +++ b/pkg/database/ent/schema/event.go @@ -0,0 +1,36 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Event holds the schema definition for the Event entity. +type Event struct { + ent.Schema +} + +// Fields of the Event. +func (Event) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.Time("updated_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.Time("time"), + field.String("serialized").MaxLen(8191), + } +} + +// Edges of the Event. +func (Event) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("owner", Alert.Type). + Ref("events"). + Unique(), + } +} diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go new file mode 100644 index 0000000..f711bc6 --- /dev/null +++ b/pkg/database/ent/schema/machine.go @@ -0,0 +1,47 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Machine holds the schema definition for the Machine entity. +type Machine struct { + ent.Schema +} + +// Fields of the Machine. +func (Machine) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.Time("updated_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.Time("last_push"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.Time("last_heartbeat"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.String("machineId").Unique(), + field.String("password").Sensitive(), + field.String("ipAddress"), + field.String("scenarios").MaxLen(4095).Optional(), + field.String("version").Optional(), + field.Bool("isValidated"). + Default(false), + field.String("status").Optional(), + field.String("auth_type").Default(types.PasswordAuthType).StructTag(`json:"auth_type"`), + } +} + +// Edges of the Machine. +func (Machine) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("alerts", Alert.Type), + } +} diff --git a/pkg/database/ent/schema/meta.go b/pkg/database/ent/schema/meta.go new file mode 100644 index 0000000..121e7a2 --- /dev/null +++ b/pkg/database/ent/schema/meta.go @@ -0,0 +1,36 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Meta holds the schema definition for the Meta entity. +type Meta struct { + ent.Schema +} + +// Fields of the Meta. +func (Meta) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.Time("updated_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow).Nillable().Optional(), + field.String("key"), + field.String("value").MaxLen(4095), + } +} + +// Edges of the Meta. +func (Meta) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("owner", Alert.Type). + Ref("metas"). + Unique(), + } +} diff --git a/pkg/database/ent/tx.go b/pkg/database/ent/tx.go new file mode 100644 index 0000000..b44fecd --- /dev/null +++ b/pkg/database/ent/tx.go @@ -0,0 +1,225 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // Alert is the client for interacting with the Alert builders. + Alert *AlertClient + // Bouncer is the client for interacting with the Bouncer builders. + Bouncer *BouncerClient + // Decision is the client for interacting with the Decision builders. + Decision *DecisionClient + // Event is the client for interacting with the Event builders. + Event *EventClient + // Machine is the client for interacting with the Machine builders. + Machine *MachineClient + // Meta is the client for interacting with the Meta builders. + Meta *MetaClient + + // lazily loaded. + client *Client + clientOnce sync.Once + + // completion callbacks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook + + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + tx.mu.Lock() + hooks := append([]CommitHook(nil), tx.onCommit...) + tx.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + tx.mu.Lock() + defer tx.mu.Unlock() + tx.onCommit = append(tx.onCommit, f) +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + tx.mu.Lock() + hooks := append([]RollbackHook(nil), tx.onRollback...) + tx.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + tx.mu.Lock() + defer tx.mu.Unlock() + tx.onRollback = append(tx.onRollback, f) +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.Alert = NewAlertClient(tx.config) + tx.Bouncer = NewBouncerClient(tx.config) + tx.Decision = NewDecisionClient(tx.config) + tx.Event = NewEventClient(tx.config) + tx.Machine = NewMachineClient(tx.config) + tx.Meta = NewMetaClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: Alert.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/pkg/database/errors.go b/pkg/database/errors.go new file mode 100644 index 0000000..8e96f52 --- /dev/null +++ b/pkg/database/errors.go @@ -0,0 +1,22 @@ +package database + +import "errors" + +var ( + UserExists = errors.New("user already exist") + UserNotExists = errors.New("user doesn't exist") + HashError = errors.New("unable to hash") + InsertFail = errors.New("unable to insert row") + QueryFail = errors.New("unable to query") + UpdateFail = errors.New("unable to update") + DeleteFail = errors.New("unable to delete") + ItemNotFound = errors.New("object not found") + ParseTimeFail = errors.New("unable to parse time") + ParseDurationFail = errors.New("unable to parse duration") + MarshalFail = errors.New("unable to marshal") + UnmarshalFail = errors.New("unable to unmarshal") + BulkError = errors.New("unable to insert bulk") + ParseType = errors.New("unable to parse type") + InvalidIPOrRange = errors.New("invalid ip address / range") + InvalidFilter = errors.New("invalid filter") +) diff --git a/pkg/database/file_utils.go b/pkg/database/file_utils.go new file mode 100644 index 0000000..e2e9679 --- /dev/null +++ b/pkg/database/file_utils.go @@ -0,0 +1,12 @@ +//go:build !windows + +package database + +import ( + "io/fs" + "os" +) + +func setFilePerm(path string, mode fs.FileMode) error { + return os.Chmod(path, mode) +} diff --git a/pkg/database/file_utils_windows.go b/pkg/database/file_utils_windows.go new file mode 100644 index 0000000..a3a929c --- /dev/null +++ b/pkg/database/file_utils_windows.go @@ -0,0 +1,79 @@ +package database + +import ( + "fmt" + "io/fs" + + log "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +func setFilePerm(path string, mode fs.FileMode) error { + //On windows, we don't care about the mode, just make sure the file is only readable/writable by the owner and group + + sd, err := windows.GetNamedSecurityInfo(path, windows.SE_FILE_OBJECT, windows.OWNER_SECURITY_INFORMATION) + if err != nil { + return fmt.Errorf("while getting security info: %w", err) + } + + currentOwner, defaulted, err := sd.Owner() + + if err != nil { + return fmt.Errorf("while getting owner: %w", err) + } + + log.Debugf("current owner is %s (%v) (defaulted: %v)", currentOwner.String(), currentOwner, defaulted) + + currentGroup, defaulted, err := sd.Group() + + if err != nil { + return fmt.Errorf("while getting group: %w", err) + } + + if currentGroup == nil { + log.Debugf("current group is nil (defaulted: %v), using builtin admin instead", defaulted) + currentGroup, err = windows.CreateWellKnownSid(windows.WinBuiltinAdministratorsSid) + if err != nil { + return fmt.Errorf("while creating admin SID: %w", err) + } + } + + log.Debugf("current group is %s (%v) (defaulted: %v)", currentGroup.String(), currentGroup, defaulted) + + dacl, err := windows.ACLFromEntries( + []windows.EXPLICIT_ACCESS{ + { + AccessPermissions: windows.GENERIC_ALL, + AccessMode: windows.GRANT_ACCESS, + Inheritance: windows.NO_INHERITANCE, + Trustee: windows.TRUSTEE{ + MultipleTrusteeOperation: windows.NO_MULTIPLE_TRUSTEE, + TrusteeForm: windows.TRUSTEE_IS_SID, + TrusteeType: windows.TRUSTEE_IS_USER, + TrusteeValue: windows.TrusteeValueFromSID(currentOwner), + }, + }, + { + AccessPermissions: windows.GENERIC_ALL, + AccessMode: windows.GRANT_ACCESS, + Inheritance: windows.NO_INHERITANCE, + Trustee: windows.TRUSTEE{ + MultipleTrusteeOperation: windows.NO_MULTIPLE_TRUSTEE, + TrusteeForm: windows.TRUSTEE_IS_SID, + TrusteeType: windows.TRUSTEE_IS_GROUP, + TrusteeValue: windows.TrusteeValueFromSID(currentGroup), + }, + }, + }, nil) + + if err != nil { + return fmt.Errorf("while creating ACL: %w", err) + } + + err = windows.SetNamedSecurityInfo(path, windows.SE_FILE_OBJECT, windows.DACL_SECURITY_INFORMATION|windows.PROTECTED_DACL_SECURITY_INFORMATION, nil, nil, dacl, nil) + + if err != nil { + return fmt.Errorf("while setting security info: %w", err) + } + return nil +} diff --git a/pkg/database/machines.go b/pkg/database/machines.go new file mode 100644 index 0000000..444d726 --- /dev/null +++ b/pkg/database/machines.go @@ -0,0 +1,185 @@ +package database + +import ( + "fmt" + "time" + + "github.com/go-openapi/strfmt" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/pkg/errors" + "golang.org/x/crypto/bcrypt" +) + +const CapiMachineID = "CAPI" +const CapiListsMachineID = "lists" + +func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { + hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost) + if err != nil { + c.Log.Warningf("CreateMachine : %s", err) + return nil, errors.Wrap(HashError, "") + } + + machineExist, err := c.Ent.Machine. + Query(). + Where(machine.MachineIdEQ(*machineID)). + Select(machine.FieldMachineId).Strings(c.CTX) + if err != nil { + return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) + } + if len(machineExist) > 0 { + if force { + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(*machineID)).SetPassword(string(hashPassword)).Save(c.CTX) + if err != nil { + c.Log.Warningf("CreateMachine : %s", err) + return nil, errors.Wrapf(UpdateFail, "machine '%s'", *machineID) + } + machine, err := c.QueryMachineByID(*machineID) + if err != nil { + return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) + } + return machine, nil + } + return nil, errors.Wrapf(UserExists, "user '%s'", *machineID) + } + + machine, err := c.Ent.Machine. + Create(). + SetMachineId(*machineID). + SetPassword(string(hashPassword)). + SetIpAddress(ipAddress). + SetIsValidated(isValidated). + SetAuthType(authType). + Save(c.CTX) + + if err != nil { + c.Log.Warningf("CreateMachine : %s", err) + return nil, errors.Wrapf(InsertFail, "creating machine '%s'", *machineID) + } + + return machine, nil +} + +func (c *Client) QueryMachineByID(machineID string) (*ent.Machine, error) { + machine, err := c.Ent.Machine. + Query(). + Where(machine.MachineIdEQ(machineID)). + Only(c.CTX) + if err != nil { + c.Log.Warningf("QueryMachineByID : %s", err) + return &ent.Machine{}, errors.Wrapf(UserNotExists, "user '%s'", machineID) + } + return machine, nil +} + +func (c *Client) ListMachines() ([]*ent.Machine, error) { + machines, err := c.Ent.Machine.Query().All(c.CTX) + if err != nil { + return []*ent.Machine{}, errors.Wrapf(QueryFail, "listing machines: %s", err) + } + return machines, nil +} + +func (c *Client) ValidateMachine(machineID string) error { + rets, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetIsValidated(true).Save(c.CTX) + if err != nil { + return errors.Wrapf(UpdateFail, "validating machine: %s", err) + } + if rets == 0 { + return fmt.Errorf("machine not found") + } + return nil +} + +func (c *Client) QueryPendingMachine() ([]*ent.Machine, error) { + var machines []*ent.Machine + var err error + + machines, err = c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(c.CTX) + if err != nil { + c.Log.Warningf("QueryPendingMachine : %s", err) + return []*ent.Machine{}, errors.Wrapf(QueryFail, "querying pending machines: %s", err) + } + return machines, nil +} + +func (c *Client) DeleteWatcher(name string) error { + nbDeleted, err := c.Ent.Machine. + Delete(). + Where(machine.MachineIdEQ(name)). + Exec(c.CTX) + if err != nil { + return err + } + + if nbDeleted == 0 { + return fmt.Errorf("machine doesn't exist") + } + + return nil +} + +func (c *Client) UpdateMachineLastPush(machineID string) error { + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastPush(time.Now().UTC()).Save(c.CTX) + if err != nil { + return errors.Wrapf(UpdateFail, "updating machine last_push: %s", err) + } + return nil +} + +func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastHeartbeat(time.Now().UTC()).Save(c.CTX) + if err != nil { + return errors.Wrapf(UpdateFail, "updating machine last_heartbeat: %s", err) + } + return nil +} + +func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { + _, err := c.Ent.Machine.UpdateOneID(ID). + SetUpdatedAt(time.Now().UTC()). + SetScenarios(scenarios). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update machine in database: %s", err) + } + return nil +} + +func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { + _, err := c.Ent.Machine.UpdateOneID(ID). + SetIpAddress(ipAddr). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update machine IP in database: %s", err) + } + return nil +} + +func (c *Client) UpdateMachineVersion(ipAddr string, ID int) error { + _, err := c.Ent.Machine.UpdateOneID(ID). + SetVersion(ipAddr). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update machine version in database: %s", err) + } + return nil +} + +func (c *Client) IsMachineRegistered(machineID string) (bool, error) { + exist, err := c.Ent.Machine.Query().Where().Select(machine.FieldMachineId).Strings(c.CTX) + if err != nil { + return false, err + } + if len(exist) == 1 { + return true, nil + } + if len(exist) > 1 { + return false, fmt.Errorf("More than one item with the same machineID in database") + } + + return false, nil + +} diff --git a/pkg/database/utils.go b/pkg/database/utils.go new file mode 100644 index 0000000..5d6d4a4 --- /dev/null +++ b/pkg/database/utils.go @@ -0,0 +1,65 @@ +package database + +import ( + "encoding/binary" + "fmt" + "net" +) + +func IP2Int(ip net.IP) uint32 { + if len(ip) == 16 { + return binary.BigEndian.Uint32(ip[12:16]) + } + return binary.BigEndian.Uint32(ip) +} + +func Int2ip(nn uint32) net.IP { + ip := make(net.IP, 4) + binary.BigEndian.PutUint32(ip, nn) + return ip +} + +func IsIpv4(host string) bool { + return net.ParseIP(host) != nil +} + +//Stolen from : https://github.com/llimllib/ipaddress/ +// Return the final address of a net range. Convert to IPv4 if possible, +// otherwise return an ipv6 +func LastAddress(n *net.IPNet) net.IP { + ip := n.IP.To4() + if ip == nil { + ip = n.IP + return net.IP{ + ip[0] | ^n.Mask[0], ip[1] | ^n.Mask[1], ip[2] | ^n.Mask[2], + ip[3] | ^n.Mask[3], ip[4] | ^n.Mask[4], ip[5] | ^n.Mask[5], + ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], + ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], + ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], + ip[15] | ^n.Mask[15]} + } + + return net.IPv4( + ip[0]|^n.Mask[0], + ip[1]|^n.Mask[1], + ip[2]|^n.Mask[2], + ip[3]|^n.Mask[3]) +} + +func GetIpsFromIpRange(host string) (int64, int64, error) { + var ipStart int64 + var ipEnd int64 + var err error + var parsedRange *net.IPNet + + if _, parsedRange, err = net.ParseCIDR(host); err != nil { + return ipStart, ipEnd, fmt.Errorf("'%s' is not a valid CIDR", host) + } + if parsedRange == nil { + return ipStart, ipEnd, fmt.Errorf("unable to parse network : %s", err) + } + ipStart = int64(IP2Int(parsedRange.IP)) + ipEnd = int64(IP2Int(LastAddress(parsedRange))) + + return ipStart, ipEnd, nil +} diff --git a/pkg/exprhelpers/exprlib.go b/pkg/exprhelpers/exprlib.go new file mode 100644 index 0000000..26d29b5 --- /dev/null +++ b/pkg/exprhelpers/exprlib.go @@ -0,0 +1,292 @@ +package exprhelpers + +import ( + "bufio" + "fmt" + "net" + "net/url" + "os" + "path" + "regexp" + "strconv" + "strings" + "time" + + "github.com/c-robinson/iplib" + + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" +) + +var dataFile map[string][]string +var dataFileRegex map[string][]*regexp.Regexp +var dbClient *database.Client + +func Atof(x string) float64 { + log.Debugf("debug atof %s", x) + ret, err := strconv.ParseFloat(x, 64) + if err != nil { + log.Warningf("Atof : can't convert float '%s' : %v", x, err) + } + return ret +} + +func Upper(s string) string { + return strings.ToUpper(s) +} + +func Lower(s string) string { + return strings.ToLower(s) +} + +func GetExprEnv(ctx map[string]interface{}) map[string]interface{} { + var ExprLib = map[string]interface{}{ + "Atof": Atof, + "JsonExtract": JsonExtract, + "JsonExtractUnescape": JsonExtractUnescape, + "JsonExtractLib": JsonExtractLib, + "JsonExtractSlice": JsonExtractSlice, + "JsonExtractObject": JsonExtractObject, + "ToJsonString": ToJson, + "File": File, + "RegexpInFile": RegexpInFile, + "Upper": Upper, + "Lower": Lower, + "IpInRange": IpInRange, + "TimeNow": TimeNow, + "ParseUri": ParseUri, + "PathUnescape": PathUnescape, + "QueryUnescape": QueryUnescape, + "PathEscape": PathEscape, + "QueryEscape": QueryEscape, + "XMLGetAttributeValue": XMLGetAttributeValue, + "XMLGetNodeValue": XMLGetNodeValue, + "IpToRange": IpToRange, + "IsIPV6": IsIPV6, + "LookupHost": LookupHost, + "GetDecisionsCount": GetDecisionsCount, + "GetDecisionsSinceCount": GetDecisionsSinceCount, + "Sprintf": fmt.Sprintf, + } + for k, v := range ctx { + ExprLib[k] = v + } + return ExprLib +} + +func Init(databaseClient *database.Client) error { + dataFile = make(map[string][]string) + dataFileRegex = make(map[string][]*regexp.Regexp) + dbClient = databaseClient + return nil +} + +func FileInit(fileFolder string, filename string, fileType string) error { + log.Debugf("init (folder:%s) (file:%s) (type:%s)", fileFolder, filename, fileType) + filepath := path.Join(fileFolder, filename) + file, err := os.Open(filepath) + if err != nil { + return err + } + defer file.Close() + + if fileType == "" { + log.Debugf("ignored file %s%s because no type specified", fileFolder, filename) + return nil + } + if _, ok := dataFile[filename]; !ok { + dataFile[filename] = []string{} + } + scanner := bufio.NewScanner(file) + for scanner.Scan() { + if strings.HasPrefix(scanner.Text(), "#") { // allow comments + continue + } + if len(scanner.Text()) == 0 { //skip empty lines + continue + } + switch fileType { + case "regex", "regexp": + dataFileRegex[filename] = append(dataFileRegex[filename], regexp.MustCompile(scanner.Text())) + case "string": + dataFile[filename] = append(dataFile[filename], scanner.Text()) + default: + return fmt.Errorf("unknown data type '%s' for : '%s'", fileType, filename) + } + } + + if err := scanner.Err(); err != nil { + return err + } + return nil +} + +func QueryEscape(s string) string { + return url.QueryEscape(s) +} + +func PathEscape(s string) string { + return url.PathEscape(s) +} + +func PathUnescape(s string) string { + ret, err := url.PathUnescape(s) + if err != nil { + log.Debugf("unable to PathUnescape '%s': %+v", s, err) + return s + } + return ret +} + +func QueryUnescape(s string) string { + ret, err := url.QueryUnescape(s) + if err != nil { + log.Debugf("unable to QueryUnescape '%s': %+v", s, err) + return s + } + return ret +} + +func File(filename string) []string { + if _, ok := dataFile[filename]; ok { + return dataFile[filename] + } + log.Errorf("file '%s' (type:string) not found in expr library", filename) + log.Errorf("expr library : %s", spew.Sdump(dataFile)) + return []string{} +} + +func RegexpInFile(data string, filename string) bool { + if _, ok := dataFileRegex[filename]; ok { + for _, re := range dataFileRegex[filename] { + if re.Match([]byte(data)) { + return true + } + } + } else { + log.Errorf("file '%s' (type:regexp) not found in expr library", filename) + log.Errorf("expr library : %s", spew.Sdump(dataFileRegex)) + } + return false +} + +func IpInRange(ip string, ipRange string) bool { + var err error + var ipParsed net.IP + var ipRangeParsed *net.IPNet + + ipParsed = net.ParseIP(ip) + if ipParsed == nil { + log.Debugf("'%s' is not a valid IP", ip) + return false + } + if _, ipRangeParsed, err = net.ParseCIDR(ipRange); err != nil { + log.Debugf("'%s' is not a valid IP Range", ipRange) + return false + } + if ipRangeParsed.Contains(ipParsed) { + return true + } + return false +} + +func IsIPV6(ip string) bool { + ipParsed := net.ParseIP(ip) + if ipParsed == nil { + log.Debugf("'%s' is not a valid IP", ip) + return false + } + + // If it's a valid IP and can't be converted to IPv4 then it is an IPv6 + return ipParsed.To4() == nil +} + +func IpToRange(ip string, cidr string) string { + cidr = strings.TrimPrefix(cidr, "/") + mask, err := strconv.Atoi(cidr) + if err != nil { + log.Errorf("bad cidr '%s': %s", cidr, err) + return "" + } + + ipAddr := net.ParseIP(ip) + if ipAddr == nil { + log.Errorf("can't parse IP address '%s'", ip) + return "" + } + ipRange := iplib.NewNet(ipAddr, mask) + if ipRange.IP() == nil { + log.Errorf("can't get cidr '%s' of '%s'", cidr, ip) + return "" + } + return ipRange.String() +} + +func TimeNow() string { + return time.Now().UTC().Format(time.RFC3339) +} + +func ParseUri(uri string) map[string][]string { + ret := make(map[string][]string) + u, err := url.Parse(uri) + if err != nil { + log.Errorf("Could not parse URI: %s", err) + return ret + } + parsed, err := url.ParseQuery(u.RawQuery) + if err != nil { + log.Errorf("Could not parse query uri : %s", err) + return ret + } + for k, v := range parsed { + ret[k] = v + } + return ret +} + +func KeyExists(key string, dict map[string]interface{}) bool { + _, ok := dict[key] + return ok +} + +func GetDecisionsCount(value string) int { + if dbClient == nil { + log.Error("No database config to call GetDecisionsCount()") + return 0 + } + count, err := dbClient.CountDecisionsByValue(value) + if err != nil { + log.Errorf("Failed to get decisions count from value '%s'", value) + return 0 + } + return count +} + +func GetDecisionsSinceCount(value string, since string) int { + if dbClient == nil { + log.Error("No database config to call GetDecisionsCount()") + return 0 + } + sinceDuration, err := time.ParseDuration(since) + if err != nil { + log.Errorf("Failed to parse since parameter '%s' : %s", since, err) + return 0 + } + sinceTime := time.Now().UTC().Add(-sinceDuration) + count, err := dbClient.CountDecisionsSinceByValue(value, sinceTime) + if err != nil { + log.Errorf("Failed to get decisions count from value '%s'", value) + return 0 + } + return count +} + +func LookupHost(value string) []string { + addresses , err := net.LookupHost(value) + if err != nil { + log.Errorf("Failed to lookup host '%s' : %s", value, err) + return []string{} + } + return addresses +} diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go new file mode 100644 index 0000000..1bfbf77 --- /dev/null +++ b/pkg/exprhelpers/exprlib_test.go @@ -0,0 +1,973 @@ +package exprhelpers + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/pkg/errors" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + + "testing" + + "github.com/antonmedv/expr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + TestFolder = "tests" +) + +func getDBClient(t *testing.T) *database.Client { + t.Helper() + dbPath, err := os.CreateTemp("", "*sqlite") + if err != nil { + t.Fatal(err) + } + testDbClient, err := database.NewClient(&csconfig.DatabaseCfg{ + Type: "sqlite", + DbName: "crowdsec", + DbPath: dbPath.Name(), + }) + if err != nil { + t.Fatal(err) + } + return testDbClient +} + +func TestVisitor(t *testing.T) { + if err := Init(nil); err != nil { + log.Fatalf(err.Error()) + } + + tests := []struct { + name string + filter string + result bool + env map[string]interface{} + err error + }{ + { + name: "debug : no variable", + filter: "'crowdsec' startsWith 'crowdse'", + result: true, + err: nil, + env: map[string]interface{}{}, + }, + { + name: "debug : simple variable", + filter: "'crowdsec' startsWith static_one && 1 == 1", + result: true, + err: nil, + env: map[string]interface{}{"static_one": string("crowdse")}, + }, + { + name: "debug : simple variable re-used", + filter: "static_one.foo == 'bar' && static_one.foo != 'toto'", + result: true, + err: nil, + env: map[string]interface{}{"static_one": map[string]string{"foo": "bar"}}, + }, + { + name: "debug : can't compile", + filter: "static_one.foo.toto == 'lol'", + result: false, + err: fmt.Errorf("bad syntax"), + env: map[string]interface{}{"static_one": map[string]string{"foo": "bar"}}, + }, + { + name: "debug : can't compile #2", + filter: "static_one.f!oo.to/to == 'lol'", + result: false, + err: fmt.Errorf("bad syntax"), + env: map[string]interface{}{"static_one": map[string]string{"foo": "bar"}}, + }, + { + name: "debug : can't compile #3", + filter: "", + result: false, + err: fmt.Errorf("bad syntax"), + env: map[string]interface{}{"static_one": map[string]string{"foo": "bar"}}, + }, + } + + log.SetLevel(log.DebugLevel) + clog := log.WithFields(log.Fields{ + "type": "test", + }) + + for _, test := range tests { + compiledFilter, err := expr.Compile(test.filter, expr.Env(GetExprEnv(test.env))) + if err != nil && test.err == nil { + log.Fatalf("compile: %s", err) + } + debugFilter, err := NewDebugger(test.filter, expr.Env(GetExprEnv(test.env))) + if err != nil && test.err == nil { + log.Fatalf("debug: %s", err) + } + + if compiledFilter != nil { + result, err := expr.Run(compiledFilter, GetExprEnv(test.env)) + if err != nil && test.err == nil { + log.Fatalf("run : %s", err) + } + if isOk := assert.Equal(t, test.result, result); !isOk { + t.Fatalf("test '%s' : NOK", test.filter) + } + } + + if debugFilter != nil { + debugFilter.Run(clog, test.result, GetExprEnv(test.env)) + } + } +} + +func TestRegexpInFile(t *testing.T) { + if err := Init(nil); err != nil { + log.Fatalf(err.Error()) + } + + err := FileInit(TestFolder, "test_data_re.txt", "regex") + if err != nil { + log.Fatalf(err.Error()) + } + + tests := []struct { + name string + filter string + result bool + err error + }{ + { + name: "RegexpInFile() test: lower case word in data file", + filter: "RegexpInFile('crowdsec', 'test_data_re.txt')", + result: false, + err: nil, + }, + { + name: "RegexpInFile() test: Match exactly", + filter: "RegexpInFile('Crowdsec', 'test_data_re.txt')", + result: true, + err: nil, + }, + { + name: "RegexpInFile() test: match with word before", + filter: "RegexpInFile('test Crowdsec', 'test_data_re.txt')", + result: true, + err: nil, + }, + { + name: "RegexpInFile() test: match with word before and other case", + filter: "RegexpInFile('test CrowdSec', 'test_data_re.txt')", + result: true, + err: nil, + }, + } + + for _, test := range tests { + compiledFilter, err := expr.Compile(test.filter, expr.Env(GetExprEnv(map[string]interface{}{}))) + if err != nil { + log.Fatalf(err.Error()) + } + result, err := expr.Run(compiledFilter, GetExprEnv(map[string]interface{}{})) + if err != nil { + log.Fatalf(err.Error()) + } + if isOk := assert.Equal(t, test.result, result); !isOk { + t.Fatalf("test '%s' : NOK", test.name) + } + } +} + +func TestFileInit(t *testing.T) { + if err := Init(nil); err != nil { + log.Fatalf(err.Error()) + } + + tests := []struct { + name string + filename string + types string + result int + err error + }{ + { + name: "file with type:string", + filename: "test_data.txt", + types: "string", + result: 3, + }, + { + name: "file with type:string and empty lines + commentaries", + filename: "test_empty_line.txt", + types: "string", + result: 3, + }, + { + name: "file with type:re", + filename: "test_data_re.txt", + types: "regex", + result: 2, + }, + { + name: "file without type", + filename: "test_data_no_type.txt", + types: "", + }, + } + + for _, test := range tests { + err := FileInit(TestFolder, test.filename, test.types) + if err != nil { + log.Fatalf(err.Error()) + } + if test.types == "string" { + if _, ok := dataFile[test.filename]; !ok { + t.Fatalf("test '%s' : NOK", test.name) + } + if isOk := assert.Equal(t, test.result, len(dataFile[test.filename])); !isOk { + t.Fatalf("test '%s' : NOK", test.name) + } + } else if test.types == "regex" { + if _, ok := dataFileRegex[test.filename]; !ok { + t.Fatalf("test '%s' : NOK", test.name) + } + if isOk := assert.Equal(t, test.result, len(dataFileRegex[test.filename])); !isOk { + t.Fatalf("test '%s' : NOK", test.name) + } + } else { + if _, ok := dataFileRegex[test.filename]; ok { + t.Fatalf("test '%s' : NOK", test.name) + } + if _, ok := dataFile[test.filename]; ok { + t.Fatalf("test '%s' : NOK", test.name) + } + } + log.Printf("test '%s' : OK", test.name) + } +} + +func TestFile(t *testing.T) { + if err := Init(nil); err != nil { + log.Fatalf(err.Error()) + } + + err := FileInit(TestFolder, "test_data.txt", "string") + if err != nil { + log.Fatalf(err.Error()) + } + + tests := []struct { + name string + filter string + result bool + err error + }{ + { + name: "File() test: word in file", + filter: "'Crowdsec' in File('test_data.txt')", + result: true, + err: nil, + }, + { + name: "File() test: word in file but different case", + filter: "'CrowdSecurity' in File('test_data.txt')", + result: false, + err: nil, + }, + { + name: "File() test: word not in file", + filter: "'test' in File('test_data.txt')", + result: false, + err: nil, + }, + { + name: "File() test: filepath provided doesn't exist", + filter: "'test' in File('non_existing_data.txt')", + result: false, + err: nil, + }, + } + + for _, test := range tests { + compiledFilter, err := expr.Compile(test.filter, expr.Env(GetExprEnv(map[string]interface{}{}))) + if err != nil { + log.Fatalf(err.Error()) + } + result, err := expr.Run(compiledFilter, GetExprEnv(map[string]interface{}{})) + if err != nil { + log.Fatalf(err.Error()) + } + if isOk := assert.Equal(t, test.result, result); !isOk { + t.Fatalf("test '%s' : NOK", test.name) + } + log.Printf("test '%s' : OK", test.name) + + } +} + +func TestIpInRange(t *testing.T) { + tests := []struct { + name string + env map[string]interface{} + code string + result bool + err string + }{ + { + name: "IpInRange() test: basic test", + env: map[string]interface{}{ + "ip": "192.168.0.1", + "ipRange": "192.168.0.0/24", + "IpInRange": IpInRange, + }, + code: "IpInRange(ip, ipRange)", + result: true, + err: "", + }, + { + name: "IpInRange() test: malformed IP", + env: map[string]interface{}{ + "ip": "192.168.0", + "ipRange": "192.168.0.0/24", + "IpInRange": IpInRange, + }, + code: "IpInRange(ip, ipRange)", + result: false, + err: "", + }, + { + name: "IpInRange() test: malformed IP range", + env: map[string]interface{}{ + "ip": "192.168.0.0/255", + "ipRange": "192.168.0.0/24", + "IpInRange": IpInRange, + }, + code: "IpInRange(ip, ipRange)", + result: false, + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, expr.Env(test.env)) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } + +} + +func TestIpToRange(t *testing.T) { + tests := []struct { + name string + env map[string]interface{} + code string + result string + err string + }{ + { + name: "IpToRange() test: IPv4", + env: map[string]interface{}{ + "ip": "192.168.1.1", + "netmask": "16", + "IpToRange": IpToRange, + }, + code: "IpToRange(ip, netmask)", + result: "192.168.0.0/16", + err: "", + }, + { + name: "IpToRange() test: IPv6", + env: map[string]interface{}{ + "ip": "2001:db8::1", + "netmask": "/64", + "IpToRange": IpToRange, + }, + code: "IpToRange(ip, netmask)", + result: "2001:db8::/64", + err: "", + }, + { + name: "IpToRange() test: malformed netmask", + env: map[string]interface{}{ + "ip": "192.168.0.1", + "netmask": "test", + "IpToRange": IpToRange, + }, + code: "IpToRange(ip, netmask)", + result: "", + err: "", + }, + { + name: "IpToRange() test: malformed IP", + env: map[string]interface{}{ + "ip": "a.b.c.d", + "netmask": "24", + "IpToRange": IpToRange, + }, + code: "IpToRange(ip, netmask)", + result: "", + err: "", + }, + { + name: "IpToRange() test: too high netmask", + env: map[string]interface{}{ + "ip": "192.168.1.1", + "netmask": "35", + "IpToRange": IpToRange, + }, + code: "IpToRange(ip, netmask)", + result: "", + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, expr.Env(test.env)) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } + +} + +func TestAtof(t *testing.T) { + testFloat := "1.5" + expectedFloat := 1.5 + + if Atof(testFloat) != expectedFloat { + t.Fatalf("Atof should returned 1.5 as a float") + } + + log.Printf("test 'Atof()' : OK") + + //bad float + testFloat = "1aaa.5" + expectedFloat = 0.0 + + if Atof(testFloat) != expectedFloat { + t.Fatalf("Atof should returned a negative value (error) as a float got") + } + + log.Printf("test 'Atof()' : OK") +} + +func TestUpper(t *testing.T) { + testStr := "test" + expectedStr := "TEST" + + if Upper(testStr) != expectedStr { + t.Fatalf("Upper() should returned test in upper case") + } + + log.Printf("test 'Upper()' : OK") +} + +func TestTimeNow(t *testing.T) { + ti, err := time.Parse(time.RFC3339, TimeNow()) + if err != nil { + t.Fatalf("Error parsing the return value of TimeNow: %s", err) + } + + if -1*time.Until(ti) > time.Second { + t.Fatalf("TimeNow func should return time.Now().UTC()") + } + log.Printf("test 'TimeNow()' : OK") +} + +func TestParseUri(t *testing.T) { + tests := []struct { + name string + env map[string]interface{} + code string + result map[string][]string + err string + }{ + { + name: "ParseUri() test: basic test", + env: map[string]interface{}{ + "uri": "/foo?a=1&b=2", + "ParseUri": ParseUri, + }, + code: "ParseUri(uri)", + result: map[string][]string{"a": []string{"1"}, "b": []string{"2"}}, + err: "", + }, + { + name: "ParseUri() test: no param", + env: map[string]interface{}{ + "uri": "/foo", + "ParseUri": ParseUri, + }, + code: "ParseUri(uri)", + result: map[string][]string{}, + err: "", + }, + { + name: "ParseUri() test: extra question mark", + env: map[string]interface{}{ + "uri": "/foo?a=1&b=2?", + "ParseUri": ParseUri, + }, + code: "ParseUri(uri)", + result: map[string][]string{"a": []string{"1"}, "b": []string{"2?"}}, + err: "", + }, + { + name: "ParseUri() test: weird params", + env: map[string]interface{}{ + "uri": "/foo?&?&&&&?=123", + "ParseUri": ParseUri, + }, + code: "ParseUri(uri)", + result: map[string][]string{"?": []string{"", "123"}}, + err: "", + }, + { + name: "ParseUri() test: bad encoding", + env: map[string]interface{}{ + "uri": "/foo?a=%%F", + "ParseUri": ParseUri, + }, + code: "ParseUri(uri)", + result: map[string][]string{}, + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, expr.Env(test.env)) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} + +func TestQueryEscape(t *testing.T) { + tests := []struct { + name string + env map[string]interface{} + code string + result string + err string + }{ + { + name: "QueryEscape() test: basic test", + env: map[string]interface{}{ + "uri": "/foo?a=1&b=2", + "QueryEscape": QueryEscape, + }, + code: "QueryEscape(uri)", + result: "%2Ffoo%3Fa%3D1%26b%3D2", + err: "", + }, + { + name: "QueryEscape() test: basic test", + env: map[string]interface{}{ + "uri": "/foo?a=1&&b=<>'\"", + "QueryEscape": QueryEscape, + }, + code: "QueryEscape(uri)", + result: "%2Ffoo%3Fa%3D1%26%26b%3D%3C%3E%27%22", + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, expr.Env(test.env)) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} + +func TestPathEscape(t *testing.T) { + tests := []struct { + name string + env map[string]interface{} + code string + result string + err string + }{ + { + name: "PathEscape() test: basic test", + env: map[string]interface{}{ + "uri": "/foo?a=1&b=2", + "PathEscape": PathEscape, + }, + code: "PathEscape(uri)", + result: "%2Ffoo%3Fa=1&b=2", + err: "", + }, + { + name: "PathEscape() test: basic test with more special chars", + env: map[string]interface{}{ + "uri": "/foo?a=1&&b=<>'\"", + "PathEscape": PathEscape, + }, + code: "PathEscape(uri)", + result: "%2Ffoo%3Fa=1&&b=%3C%3E%27%22", + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, expr.Env(test.env)) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} + +func TestPathUnescape(t *testing.T) { + tests := []struct { + name string + env map[string]interface{} + code string + result string + err string + }{ + { + name: "PathUnescape() test: basic test", + env: map[string]interface{}{ + "uri": "%2Ffoo%3Fa=1&b=%3C%3E%27%22", + "PathUnescape": PathUnescape, + }, + code: "PathUnescape(uri)", + result: "/foo?a=1&b=<>'\"", + err: "", + }, + { + name: "PathUnescape() test: basic test with more special chars", + env: map[string]interface{}{ + "uri": "/$%7Bjndi", + "PathUnescape": PathUnescape, + }, + code: "PathUnescape(uri)", + result: "/${jndi", + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, expr.Env(test.env)) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} + +func TestQueryUnescape(t *testing.T) { + tests := []struct { + name string + env map[string]interface{} + code string + result string + err string + }{ + { + name: "QueryUnescape() test: basic test", + env: map[string]interface{}{ + "uri": "%2Ffoo%3Fa=1&b=%3C%3E%27%22", + "QueryUnescape": QueryUnescape, + }, + code: "QueryUnescape(uri)", + result: "/foo?a=1&b=<>'\"", + err: "", + }, + { + name: "QueryUnescape() test: basic test with more special chars", + env: map[string]interface{}{ + "uri": "/$%7Bjndi", + "QueryUnescape": QueryUnescape, + }, + code: "QueryUnescape(uri)", + result: "/${jndi", + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, expr.Env(test.env)) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} + +func TestLower(t *testing.T) { + tests := []struct { + name string + env map[string]interface{} + code string + result string + err string + }{ + { + name: "Lower() test: basic test", + env: map[string]interface{}{ + "name": "ABCDEFG", + "Lower": Lower, + }, + code: "Lower(name)", + result: "abcdefg", + err: "", + }, + { + name: "Lower() test: basic test with more special chars", + env: map[string]interface{}{ + "name": "AbcDefG!#", + "Lower": Lower, + }, + code: "Lower(name)", + result: "abcdefg!#", + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, expr.Env(test.env)) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} + +func TestGetDecisionsCount(t *testing.T) { + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz int + existingIP := "1.2.3.4" + unknownIP := "1.2.3.5" + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(existingIP) + if err != nil { + t.Errorf("unable to convert '%s' to int: %s", existingIP, err) + } + // Add sample data to DB + dbClient = getDBClient(t) + + decision := dbClient.Ent.Decision.Create(). + SetUntil(time.Now().Add(time.Hour)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + + if decision == nil { + assert.Error(t, errors.Errorf("Failed to create sample decision")) + } + + tests := []struct { + name string + env map[string]interface{} + code string + result string + err string + }{ + { + name: "GetDecisionsCount() test: existing IP count", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &existingIP, + }, + Decisions: []*models.Decision{ + { + Value: &existingIP, + }, + }, + }, + "GetDecisionsCount": GetDecisionsCount, + "sprintf": fmt.Sprintf, + }, + code: "sprintf('%d', GetDecisionsCount(Alert.GetValue()))", + result: "1", + err: "", + }, + { + name: "GetDecisionsCount() test: unknown IP count", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &unknownIP, + }, + Decisions: []*models.Decision{ + { + Value: &unknownIP, + }, + }, + }, + "GetDecisionsCount": GetDecisionsCount, + "sprintf": fmt.Sprintf, + }, + code: "sprintf('%d', GetDecisionsCount(Alert.GetValue()))", + result: "0", + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, expr.Env(GetExprEnv(test.env))) + require.NoError(t, err) + output, err := expr.Run(program, GetExprEnv(test.env)) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} +func TestGetDecisionsSinceCount(t *testing.T) { + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz int + existingIP := "1.2.3.4" + unknownIP := "1.2.3.5" + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(existingIP) + if err != nil { + t.Errorf("unable to convert '%s' to int: %s", existingIP, err) + } + // Add sample data to DB + dbClient = getDBClient(t) + + decision := dbClient.Ent.Decision.Create(). + SetUntil(time.Now().Add(time.Hour)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + if decision == nil { + assert.Error(t, errors.Errorf("Failed to create sample decision")) + } + decision2 := dbClient.Ent.Decision.Create(). + SetCreatedAt(time.Now().AddDate(0, 0, -1)). + SetUntil(time.Now().AddDate(0, 0, -1)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + if decision2 == nil { + assert.Error(t, errors.Errorf("Failed to create sample decision")) + } + + tests := []struct { + name string + env map[string]interface{} + code string + result string + err string + }{ + { + name: "GetDecisionsSinceCount() test: existing IP count since more than 1 day", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &existingIP, + }, + Decisions: []*models.Decision{ + { + Value: &existingIP, + }, + }, + }, + "GetDecisionsSinceCount": GetDecisionsSinceCount, + "sprintf": fmt.Sprintf, + }, + code: "sprintf('%d', GetDecisionsSinceCount(Alert.GetValue(), '25h'))", + result: "2", + err: "", + }, + { + name: "GetDecisionsSinceCount() test: existing IP count since more than 1 hour", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &existingIP, + }, + Decisions: []*models.Decision{ + { + Value: &existingIP, + }, + }, + }, + "GetDecisionsSinceCount": GetDecisionsSinceCount, + "sprintf": fmt.Sprintf, + }, + code: "sprintf('%d', GetDecisionsSinceCount(Alert.GetValue(), '1h'))", + result: "1", + err: "", + }, + { + name: "GetDecisionsSinceCount() test: unknown IP count", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &unknownIP, + }, + Decisions: []*models.Decision{ + { + Value: &unknownIP, + }, + }, + }, + "GetDecisionsSinceCount": GetDecisionsSinceCount, + "sprintf": fmt.Sprintf, + }, + code: "sprintf('%d', GetDecisionsSinceCount(Alert.GetValue(), '1h'))", + result: "0", + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, expr.Env(GetExprEnv(test.env))) + require.NoError(t, err) + output, err := expr.Run(program, GetExprEnv(test.env)) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} diff --git a/pkg/exprhelpers/jsonextract.go b/pkg/exprhelpers/jsonextract.go new file mode 100644 index 0000000..61c9275 --- /dev/null +++ b/pkg/exprhelpers/jsonextract.go @@ -0,0 +1,138 @@ +package exprhelpers + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/buger/jsonparser" + + log "github.com/sirupsen/logrus" +) + +func JsonExtractLib(jsblob string, target ...string) string { + value, dataType, _, err := jsonparser.Get( + jsonparser.StringToBytes(jsblob), + target..., + ) + + if err != nil { + if err == jsonparser.KeyPathNotFoundError { + log.Debugf("%+v doesn't exist", target) + return "" + } + log.Errorf("jsonExtractLib : %+v : %s", target, err) + return "" + } + if dataType == jsonparser.NotExist { + log.Debugf("%+v doesn't exist", target) + return "" + } + strvalue := string(value) + return strvalue +} + +func JsonExtractUnescape(jsblob string, target ...string) string { + value, err := jsonparser.GetString( + jsonparser.StringToBytes(jsblob), + target..., + ) + + if err != nil { + if err == jsonparser.KeyPathNotFoundError { + log.Debugf("%+v doesn't exist", target) + return "" + } + log.Errorf("JsonExtractUnescape : %+v : %s", target, err) + return "" + } + log.Tracef("extract path %+v", target) + return value +} + +func JsonExtract(jsblob string, target string) string { + if !strings.HasPrefix(target, "[") { + target = strings.ReplaceAll(target, "[", ".[") + } + fullpath := strings.Split(target, ".") + + log.Tracef("extract path %+v", fullpath) + return JsonExtractLib(jsblob, fullpath...) +} + +func jsonExtractType(jsblob string, target string, t jsonparser.ValueType) ([]byte, error) { + if !strings.HasPrefix(target, "[") { + target = strings.ReplaceAll(target, "[", ".[") + } + fullpath := strings.Split(target, ".") + + log.Tracef("extract path %+v", fullpath) + + value, dataType, _, err := jsonparser.Get( + jsonparser.StringToBytes(jsblob), + fullpath..., + ) + + if err != nil { + if err == jsonparser.KeyPathNotFoundError { + log.Debugf("Key %+v doesn't exist", target) + return nil, fmt.Errorf("key %s does not exist", target) + } + log.Errorf("jsonExtractType : %s : %s", target, err) + return nil, fmt.Errorf("jsonExtractType: %s : %w", target, err) + } + + if dataType != t { + log.Errorf("jsonExtractType : expected type %s for target %s but found %s", t, target, dataType.String()) + return nil, fmt.Errorf("jsonExtractType: expected type %s for target %s but found %s", t, target, dataType.String()) + } + + return value, nil +} + +func JsonExtractSlice(jsblob string, target string) []interface{} { + + value, err := jsonExtractType(jsblob, target, jsonparser.Array) + + if err != nil { + log.Errorf("JsonExtractSlice : %s", err) + return nil + } + + s := make([]interface{}, 0) + + err = json.Unmarshal(value, &s) + if err != nil { + log.Errorf("JsonExtractSlice: could not convert '%s' to slice: %s", value, err) + return nil + } + return s +} + +func JsonExtractObject(jsblob string, target string) map[string]interface{} { + + value, err := jsonExtractType(jsblob, target, jsonparser.Object) + + if err != nil { + log.Errorf("JsonExtractObject: %s", err) + return nil + } + + s := make(map[string]interface{}) + + err = json.Unmarshal(value, &s) + if err != nil { + log.Errorf("JsonExtractObject: could not convert '%s' to map[string]interface{}: %s", value, err) + return nil + } + return s +} + +func ToJson(obj interface{}) string { + b, err := json.Marshal(obj) + if err != nil { + log.Errorf("ToJson : %s", err) + return "" + } + return string(b) +} diff --git a/pkg/exprhelpers/jsonextract_test.go b/pkg/exprhelpers/jsonextract_test.go new file mode 100644 index 0000000..ceb9119 --- /dev/null +++ b/pkg/exprhelpers/jsonextract_test.go @@ -0,0 +1,251 @@ +package exprhelpers + +import ( + "log" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestJsonExtract(t *testing.T) { + if err := Init(nil); err != nil { + log.Fatalf(err.Error()) + } + + err := FileInit(TestFolder, "test_data_re.txt", "regex") + if err != nil { + log.Fatalf(err.Error()) + } + + tests := []struct { + name string + jsonBlob string + targetField string + expectResult string + }{ + { + name: "basic json extract", + jsonBlob: `{"test" : "1234"}`, + targetField: "test", + expectResult: "1234", + }, + { + name: "basic json extract with non existing field", + jsonBlob: `{"test" : "1234"}`, + targetField: "non_existing_field", + expectResult: "", + }, + { + name: "extract subfield", + jsonBlob: `{"test" : {"a": "b"}}`, + targetField: "test.a", + expectResult: "b", + }, + } + + for _, test := range tests { + result := JsonExtract(test.jsonBlob, test.targetField) + isOk := assert.Equal(t, test.expectResult, result) + if !isOk { + t.Fatalf("test '%s' failed", test.name) + } + log.Printf("test '%s' : OK", test.name) + } + +} +func TestJsonExtractUnescape(t *testing.T) { + if err := Init(nil); err != nil { + log.Fatalf(err.Error()) + } + + err := FileInit(TestFolder, "test_data_re.txt", "regex") + if err != nil { + log.Fatalf(err.Error()) + } + + tests := []struct { + name string + jsonBlob string + targetField string + expectResult string + }{ + { + name: "basic json extract", + jsonBlob: `{"log" : "\"GET /JBNwtQ6i.blt HTTP/1.1\" 200 13 \"-\" \"Craftbot\""}`, + targetField: "log", + expectResult: "\"GET /JBNwtQ6i.blt HTTP/1.1\" 200 13 \"-\" \"Craftbot\"", + }, + { + name: "basic json extract with non existing field", + jsonBlob: `{"test" : "1234"}`, + targetField: "non_existing_field", + expectResult: "", + }, + } + + for _, test := range tests { + result := JsonExtractUnescape(test.jsonBlob, test.targetField) + isOk := assert.Equal(t, test.expectResult, result) + if !isOk { + t.Fatalf("test '%s' failed", test.name) + } + log.Printf("test '%s' : OK", test.name) + } +} + +func TestJsonExtractSlice(t *testing.T) { + if err := Init(nil); err != nil { + log.Fatalf(err.Error()) + } + + err := FileInit(TestFolder, "test_data_re.txt", "regex") + if err != nil { + log.Fatalf(err.Error()) + } + + tests := []struct { + name string + jsonBlob string + targetField string + expectResult []interface{} + }{ + { + name: "try to extract a string as a slice", + jsonBlob: `{"test" : "1234"}`, + targetField: "test", + expectResult: nil, + }, + { + name: "basic json slice extract", + jsonBlob: `{"test" : ["1234"]}`, + targetField: "test", + expectResult: []interface{}{"1234"}, + }, + { + name: "extract with complex expression", + jsonBlob: `{"test": {"foo": [{"a":"b"}]}}`, + targetField: "test.foo", + expectResult: []interface{}{map[string]interface{}{"a": "b"}}, + }, + { + name: "extract non-existing key", + jsonBlob: `{"test: "11234"}`, + targetField: "foo", + expectResult: nil, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + result := JsonExtractSlice(test.jsonBlob, test.targetField) + assert.Equal(t, test.expectResult, result) + }) + } +} + +func TestJsonExtractObject(t *testing.T) { + if err := Init(nil); err != nil { + log.Fatalf(err.Error()) + } + + err := FileInit(TestFolder, "test_data_re.txt", "regex") + if err != nil { + log.Fatalf(err.Error()) + } + + tests := []struct { + name string + jsonBlob string + targetField string + expectResult map[string]interface{} + }{ + { + name: "try to extract a string as an object", + jsonBlob: `{"test" : "1234"}`, + targetField: "test", + expectResult: nil, + }, + { + name: "basic json object extract", + jsonBlob: `{"test" : {"1234": {"foo": "bar"}}}`, + targetField: "test", + expectResult: map[string]interface{}{"1234": map[string]interface{}{"foo": "bar"}}, + }, + { + name: "extract with complex expression", + jsonBlob: `{"test": {"foo": [{"a":"b"}]}}`, + targetField: "test.foo[0]", + expectResult: map[string]interface{}{"a": "b"}, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + result := JsonExtractObject(test.jsonBlob, test.targetField) + assert.Equal(t, test.expectResult, result) + }) + } +} + +func TestToJson(t *testing.T) { + tests := []struct { + name string + obj interface{} + expectResult string + }{ + { + name: "convert int", + obj: 42, + expectResult: "42", + }, + { + name: "convert slice", + obj: []string{"foo", "bar"}, + expectResult: `["foo","bar"]`, + }, + { + name: "convert map", + obj: map[string]string{"foo": "bar"}, + expectResult: `{"foo":"bar"}`, + }, + { + name: "convert struct", + obj: struct{ Foo string }{"bar"}, + expectResult: `{"Foo":"bar"}`, + }, + { + name: "convert complex struct", + obj: struct { + Foo string + Bar struct { + Baz string + } + Bla []string + }{ + Foo: "bar", + Bar: struct { + Baz string + }{ + Baz: "baz", + }, + Bla: []string{"foo", "bar"}, + }, + expectResult: `{"Foo":"bar","Bar":{"Baz":"baz"},"Bla":["foo","bar"]}`, + }, + { + name: "convert invalid type", + obj: func() {}, + expectResult: "", + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + result := ToJson(test.obj) + assert.Equal(t, test.expectResult, result) + }) + } +} diff --git a/pkg/exprhelpers/tests/test_data.txt b/pkg/exprhelpers/tests/test_data.txt new file mode 100644 index 0000000..a80d621 --- /dev/null +++ b/pkg/exprhelpers/tests/test_data.txt @@ -0,0 +1,3 @@ +Crowdsec +Crowdsecurity +CrowdSec \ No newline at end of file diff --git a/pkg/exprhelpers/tests/test_data_no_type.txt b/pkg/exprhelpers/tests/test_data_no_type.txt new file mode 100644 index 0000000..a80d621 --- /dev/null +++ b/pkg/exprhelpers/tests/test_data_no_type.txt @@ -0,0 +1,3 @@ +Crowdsec +Crowdsecurity +CrowdSec \ No newline at end of file diff --git a/pkg/exprhelpers/tests/test_data_re.txt b/pkg/exprhelpers/tests/test_data_re.txt new file mode 100644 index 0000000..5b7eb9b --- /dev/null +++ b/pkg/exprhelpers/tests/test_data_re.txt @@ -0,0 +1,2 @@ +.*Crowdsec.* +.*Crowd[sS]ec.* \ No newline at end of file diff --git a/pkg/exprhelpers/tests/test_empty_line.txt b/pkg/exprhelpers/tests/test_empty_line.txt new file mode 100644 index 0000000..ca09bd0 --- /dev/null +++ b/pkg/exprhelpers/tests/test_empty_line.txt @@ -0,0 +1,12 @@ +foo + +#toto + + +bar + + + + + +baz diff --git a/pkg/exprhelpers/visitor.go b/pkg/exprhelpers/visitor.go new file mode 100644 index 0000000..7a65c06 --- /dev/null +++ b/pkg/exprhelpers/visitor.go @@ -0,0 +1,136 @@ +package exprhelpers + +import ( + "fmt" + "strconv" + "strings" + + "github.com/antonmedv/expr/parser" + "github.com/sirupsen/logrus" + log "github.com/sirupsen/logrus" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/ast" + "github.com/antonmedv/expr/vm" +) + +/* +Visitor is used to reconstruct variables with its property called in an expr filter +Thus, we can debug expr filter by displaying all variables contents present in the filter +*/ +type visitor struct { + newVar bool + currentID string + properties []string + vars []string +} + +/* +Enter should be present for the interface but is never used +*/ +func (v *visitor) Enter(node *ast.Node) {} + +/* +Exit is called when running ast.Walk(node, visitor), each time a node exit. +So we have the node information and we can get the identifier (first level of the struct) +and its properties to reconstruct the complete variable. +*/ +func (v *visitor) Exit(node *ast.Node) { + if n, ok := (*node).(*ast.IdentifierNode); ok { + if !v.newVar { + v.newVar = true + v.currentID = n.Value + } else { + fullVar := fmt.Sprintf("%s.%s", v.currentID, strings.Join(v.properties, ".")) + v.vars = append(v.vars, fullVar) + v.properties = []string{} + v.currentID = n.Value + } + } else if n, ok := (*node).(*ast.PropertyNode); ok { + v.properties = append(v.properties, n.Property) + } +} + +/* +Build reconstruct all the variables used in a filter (to display their content later). +*/ +func (v *visitor) Build(filter string, exprEnv expr.Option) (*ExprDebugger, error) { + var expressions []*expression + ret := &ExprDebugger{ + filter: filter, + } + if filter == "" { + log.Debugf("unable to create expr debugger with empty filter") + return &ExprDebugger{}, nil + } + v.newVar = false + tree, err := parser.Parse(filter) + if err != nil { + return nil, err + } + ast.Walk(&tree.Node, v) + if v.currentID != "" && len(v.properties) > 0 { // if its a variable with property (eg. evt.Line.Labels) + fullVar := fmt.Sprintf("%s.%s", v.currentID, strings.Join(v.properties, ".")) + v.vars = append(v.vars, fullVar) + } else if v.currentID != "" && len(v.properties) == 0 { // if it's a variable without property + fullVar := v.currentID + v.vars = append(v.vars, fullVar) + } else { + log.Debugf("no variable in filter : '%s'", filter) + } + v.properties = []string{} + v.currentID = "" + for _, variable := range v.vars { + debugFilter, err := expr.Compile(variable, exprEnv) + if err != nil { + return ret, fmt.Errorf("compilation of variable '%s' failed: %v", variable, err) + } + tmpExpression := &expression{ + variable, + debugFilter, + } + expressions = append(expressions, tmpExpression) + + } + ret.expression = expressions + return ret, nil +} + +// ExprDebugger contains the list of expression to be run when debugging an expression filter +type ExprDebugger struct { + filter string + expression []*expression +} + +// expression is the structure that represents the variable in string and compiled format +type expression struct { + Str string + Compiled *vm.Program +} + +/* +Run display the content of each variable of a filter by evaluating them with expr, +again the expr environment given in parameter +*/ +func (e *ExprDebugger) Run(logger *logrus.Entry, filterResult bool, exprEnv map[string]interface{}) { + if len(e.expression) == 0 { + logger.Tracef("no variable to eval for filter '%s'", e.filter) + return + } + logger.Debugf("eval(%s) = %s", e.filter, strings.ToUpper(strconv.FormatBool(filterResult))) + logger.Debugf("eval variables:") + for _, expression := range e.expression { + debug, err := expr.Run(expression.Compiled, exprEnv) + if err != nil { + logger.Errorf("unable to print debug expression for '%s': %s", expression.Str, err) + } + logger.Debugf(" %s = '%v'", expression.Str, debug) + } +} + +// NewDebugger is the exported function that build the debuggers expressions +func NewDebugger(filter string, exprEnv expr.Option) (*ExprDebugger, error) { + visitor := &visitor{} + exprDebugger, err := visitor.Build(filter, exprEnv) + return exprDebugger, err +} diff --git a/pkg/exprhelpers/xml.go b/pkg/exprhelpers/xml.go new file mode 100644 index 0000000..1d0d407 --- /dev/null +++ b/pkg/exprhelpers/xml.go @@ -0,0 +1,64 @@ +package exprhelpers + +import ( + "github.com/beevik/etree" + log "github.com/sirupsen/logrus" +) + +var pathCache = make(map[string]etree.Path) + +func XMLGetAttributeValue(xmlString string, path string, attributeName string) string { + + if _, ok := pathCache[path]; !ok { + compiledPath, err := etree.CompilePath(path) + if err != nil { + log.Errorf("Could not compile path %s: %s", path, err) + return "" + } + pathCache[path] = compiledPath + } + + compiledPath := pathCache[path] + doc := etree.NewDocument() + err := doc.ReadFromString(xmlString) + if err != nil { + log.Tracef("Could not parse XML: %s", err) + return "" + } + elem := doc.FindElementPath(compiledPath) + if elem == nil { + log.Debugf("Could not find element %s", path) + return "" + } + attr := elem.SelectAttr(attributeName) + if attr == nil { + log.Debugf("Could not find attribute %s", attributeName) + return "" + } + return attr.Value +} + +func XMLGetNodeValue(xmlString string, path string) string { + if _, ok := pathCache[path]; !ok { + compiledPath, err := etree.CompilePath(path) + if err != nil { + log.Errorf("Could not compile path %s: %s", path, err) + return "" + } + pathCache[path] = compiledPath + } + + compiledPath := pathCache[path] + doc := etree.NewDocument() + err := doc.ReadFromString(xmlString) + if err != nil { + log.Tracef("Could not parse XML: %s", err) + return "" + } + elem := doc.FindElementPath(compiledPath) + if elem == nil { + log.Debugf("Could not find element %s", path) + return "" + } + return elem.Text() +} diff --git a/pkg/exprhelpers/xml_test.go b/pkg/exprhelpers/xml_test.go new file mode 100644 index 0000000..a6fdae3 --- /dev/null +++ b/pkg/exprhelpers/xml_test.go @@ -0,0 +1,115 @@ +package exprhelpers + +import ( + "log" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestXMLGetAttributeValue(t *testing.T) { + if err := Init(nil); err != nil { + log.Fatalf(err.Error()) + } + + tests := []struct { + name string + xmlString string + path string + attribute string + expectResult string + }{ + { + name: "XMLGetAttributeValue", + xmlString: ``, + path: "/root/child", + attribute: "attr", + expectResult: "value", + }, + { + name: "Non existing attribute for XMLGetAttributeValue", + xmlString: ``, + path: "/root/child", + attribute: "asdasd", + expectResult: "", + }, + { + name: "Non existing path for XMLGetAttributeValue", + xmlString: ``, + path: "/foo/bar", + attribute: "asdasd", + expectResult: "", + }, + { + name: "Invalid XML for XMLGetAttributeValue", + xmlString: `<`, + path: "/foo/bar", + attribute: "asdasd", + expectResult: "", + }, + { + name: "Invalid path for XMLGetAttributeValue", + xmlString: ``, + path: "/foo/bar[@", + attribute: "asdasd", + expectResult: "", + }, + } + + for _, test := range tests { + result := XMLGetAttributeValue(test.xmlString, test.path, test.attribute) + isOk := assert.Equal(t, test.expectResult, result) + if !isOk { + t.Fatalf("test '%s' failed", test.name) + } + log.Printf("test '%s' : OK", test.name) + } + +} +func TestXMLGetNodeValue(t *testing.T) { + if err := Init(nil); err != nil { + log.Fatalf(err.Error()) + } + + tests := []struct { + name string + xmlString string + path string + expectResult string + }{ + { + name: "XMLGetNodeValue", + xmlString: `foobar`, + path: "/root/child", + expectResult: "foobar", + }, + { + name: "Non existing path for XMLGetNodeValue", + xmlString: `foobar`, + path: "/foo/bar", + expectResult: "", + }, + { + name: "Invalid XML for XMLGetNodeValue", + xmlString: `<`, + path: "/foo/bar", + expectResult: "", + }, + { + name: "Invalid path for XMLGetNodeValue", + xmlString: `foobar`, + path: "/foo/bar[@", + expectResult: "", + }, + } + + for _, test := range tests { + result := XMLGetNodeValue(test.xmlString, test.path) + isOk := assert.Equal(t, test.expectResult, result) + if !isOk { + t.Fatalf("test '%s' failed", test.name) + } + log.Printf("test '%s' : OK", test.name) + } + +} diff --git a/pkg/hubtest/coverage.go b/pkg/hubtest/coverage.go new file mode 100644 index 0000000..eeff24b --- /dev/null +++ b/pkg/hubtest/coverage.go @@ -0,0 +1,177 @@ +package hubtest + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + log "github.com/sirupsen/logrus" +) + +type ParserCoverage struct { + Parser string + TestsCount int + PresentIn map[string]bool //poorman's set +} + +type ScenarioCoverage struct { + Scenario string + TestsCount int + PresentIn map[string]bool +} + +func (h *HubTest) GetParsersCoverage() ([]ParserCoverage, error) { + var coverage []ParserCoverage + if _, ok := h.HubIndex.Data[cwhub.PARSERS]; !ok { + return coverage, fmt.Errorf("no parsers in hub index") + } + //populate from hub, iterate in alphabetical order + var pkeys []string + for pname := range h.HubIndex.Data[cwhub.PARSERS] { + pkeys = append(pkeys, pname) + } + sort.Strings(pkeys) + for _, pname := range pkeys { + coverage = append(coverage, ParserCoverage{ + Parser: pname, + TestsCount: 0, + PresentIn: make(map[string]bool), + }) + } + + //parser the expressions a-la-oneagain + passerts, err := filepath.Glob(".tests/*/parser.assert") + if err != nil { + return coverage, fmt.Errorf("while find parser asserts : %s", err) + } + for _, assert := range passerts { + file, err := os.Open(assert) + if err != nil { + return coverage, fmt.Errorf("while reading %s : %s", assert, err) + } + scanner := bufio.NewScanner(file) + for scanner.Scan() { + assertLine := regexp.MustCompile(`^results\["[^"]+"\]\["(?P[^"]+)"\]\[[0-9]+\]\.Evt\..*`) + line := scanner.Text() + log.Debugf("assert line : %s", line) + match := assertLine.FindStringSubmatch(line) + if len(match) == 0 { + log.Debugf("%s doesn't match", line) + continue + } + sidx := assertLine.SubexpIndex("parser") + capturedParser := match[sidx] + for idx, pcover := range coverage { + if pcover.Parser == capturedParser { + coverage[idx].TestsCount++ + coverage[idx].PresentIn[assert] = true + continue + } + parserNameSplit := strings.Split(pcover.Parser, "/") + parserNameOnly := parserNameSplit[len(parserNameSplit)-1] + if parserNameOnly == capturedParser { + coverage[idx].TestsCount++ + coverage[idx].PresentIn[assert] = true + continue + } + capturedParserSplit := strings.Split(capturedParser, "/") + capturedParserName := capturedParserSplit[len(capturedParserSplit)-1] + if capturedParserName == parserNameOnly { + coverage[idx].TestsCount++ + coverage[idx].PresentIn[assert] = true + continue + } + if capturedParserName == parserNameOnly+"-logs" { + coverage[idx].TestsCount++ + coverage[idx].PresentIn[assert] = true + continue + } + } + } + file.Close() + } + return coverage, nil +} + +func (h *HubTest) GetScenariosCoverage() ([]ScenarioCoverage, error) { + var coverage []ScenarioCoverage + if _, ok := h.HubIndex.Data[cwhub.SCENARIOS]; !ok { + return coverage, fmt.Errorf("no scenarios in hub index") + } + //populate from hub, iterate in alphabetical order + var pkeys []string + for scenarioName := range h.HubIndex.Data[cwhub.SCENARIOS] { + pkeys = append(pkeys, scenarioName) + } + sort.Strings(pkeys) + for _, scenarioName := range pkeys { + coverage = append(coverage, ScenarioCoverage{ + Scenario: scenarioName, + TestsCount: 0, + PresentIn: make(map[string]bool), + }) + } + + //parser the expressions a-la-oneagain + passerts, err := filepath.Glob(".tests/*/scenario.assert") + if err != nil { + return coverage, fmt.Errorf("while find scenario asserts : %s", err) + } + for _, assert := range passerts { + file, err := os.Open(assert) + if err != nil { + return coverage, fmt.Errorf("while reading %s : %s", assert, err) + } + scanner := bufio.NewScanner(file) + for scanner.Scan() { + assertLine := regexp.MustCompile(`^results\[[0-9]+\].Overflow.Alert.GetScenario\(\) == "(?P[^"]+)"`) + line := scanner.Text() + log.Debugf("assert line : %s", line) + match := assertLine.FindStringSubmatch(line) + if len(match) == 0 { + log.Debugf("%s doesn't match", line) + continue + } + sidx := assertLine.SubexpIndex("scenario") + scanner_name := match[sidx] + for idx, pcover := range coverage { + if pcover.Scenario == scanner_name { + coverage[idx].TestsCount++ + coverage[idx].PresentIn[assert] = true + continue + } + scenarioNameSplit := strings.Split(pcover.Scenario, "/") + scenarioNameOnly := scenarioNameSplit[len(scenarioNameSplit)-1] + if scenarioNameOnly == scanner_name { + coverage[idx].TestsCount++ + coverage[idx].PresentIn[assert] = true + continue + } + fixedProbingWord := strings.ReplaceAll(pcover.Scenario, "probbing", "probing") + fixedProbingAssert := strings.ReplaceAll(scanner_name, "probbing", "probing") + if fixedProbingWord == fixedProbingAssert { + coverage[idx].TestsCount++ + coverage[idx].PresentIn[assert] = true + continue + } + if fmt.Sprintf("%s-detection", pcover.Scenario) == scanner_name { + coverage[idx].TestsCount++ + coverage[idx].PresentIn[assert] = true + continue + } + if fmt.Sprintf("%s-detection", fixedProbingWord) == fixedProbingAssert { + coverage[idx].TestsCount++ + coverage[idx].PresentIn[assert] = true + continue + } + } + } + file.Close() + } + return coverage, nil +} diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go new file mode 100644 index 0000000..36415f7 --- /dev/null +++ b/pkg/hubtest/hubtest.go @@ -0,0 +1,113 @@ +package hubtest + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/pkg/errors" +) + +type HubTest struct { + CrowdSecPath string + CscliPath string + HubPath string + HubTestPath string + HubIndexFile string + TemplateConfigPath string + TemplateProfilePath string + TemplateSimulationPath string + HubIndex *HubIndex + Tests []*HubTestItem +} + +const ( + templateConfigFile = "template_config.yaml" + templateSimulationFile = "template_simulation.yaml" + templateProfileFile = "template_profiles.yaml" +) + +func NewHubTest(hubPath string, crowdsecPath string, cscliPath string) (HubTest, error) { + var err error + + hubPath, err = filepath.Abs(hubPath) + if err != nil { + return HubTest{}, fmt.Errorf("can't get absolute path of hub: %+v", err) + } + // we can't use hubtest without the hub + if _, err := os.Stat(hubPath); os.IsNotExist(err) { + return HubTest{}, fmt.Errorf("path to hub '%s' doesn't exist, can't run", hubPath) + } + HubTestPath := filepath.Join(hubPath, "./.tests/") + + // we can't use hubtest without crowdsec binary + if _, err := exec.LookPath(crowdsecPath); err != nil { + if _, err := os.Stat(crowdsecPath); os.IsNotExist(err) { + return HubTest{}, fmt.Errorf("path to crowdsec binary '%s' doesn't exist or is not in $PATH, can't run", crowdsecPath) + } + } + + // we can't use hubtest without cscli binary + if _, err := exec.LookPath(cscliPath); err != nil { + if _, err := os.Stat(cscliPath); os.IsNotExist(err) { + return HubTest{}, fmt.Errorf("path to cscli binary '%s' doesn't exist or is not in $PATH, can't run", cscliPath) + } + } + + hubIndexFile := filepath.Join(hubPath, ".index.json") + bidx, err := os.ReadFile(hubIndexFile) + if err != nil { + return HubTest{}, fmt.Errorf("unable to read index file: %s", err) + } + + // load hub index + hubIndex, err := cwhub.LoadPkgIndex(bidx) + if err != nil { + return HubTest{}, fmt.Errorf("unable to load hub index file: %s", err) + } + + templateConfigFilePath := filepath.Join(HubTestPath, templateConfigFile) + templateProfilePath := filepath.Join(HubTestPath, templateProfileFile) + templateSimulationPath := filepath.Join(HubTestPath, templateSimulationFile) + + return HubTest{ + CrowdSecPath: crowdsecPath, + CscliPath: cscliPath, + HubPath: hubPath, + HubTestPath: HubTestPath, + HubIndexFile: hubIndexFile, + TemplateConfigPath: templateConfigFilePath, + TemplateProfilePath: templateProfilePath, + TemplateSimulationPath: templateSimulationPath, + HubIndex: &HubIndex{Data: hubIndex}, + }, nil +} + +func (h *HubTest) LoadTestItem(name string) (*HubTestItem, error) { + HubTestItem := &HubTestItem{} + testItem, err := NewTest(name, h) + if err != nil { + return HubTestItem, err + } + h.Tests = append(h.Tests, testItem) + + return testItem, nil +} + +func (h *HubTest) LoadAllTests() error { + testsFolder, err := os.ReadDir(h.HubTestPath) + if err != nil { + return err + } + + for _, f := range testsFolder { + if f.IsDir() { + if _, err := h.LoadTestItem(f.Name()); err != nil { + return errors.Wrapf(err, "while loading %s", f.Name()) + } + } + } + return nil +} diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go new file mode 100644 index 0000000..c3e842b --- /dev/null +++ b/pkg/hubtest/hubtest_item.go @@ -0,0 +1,627 @@ +package hubtest + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +type HubTestItemConfig struct { + Parsers []string `yaml:"parsers"` + Scenarios []string `yaml:"scenarios"` + PostOVerflows []string `yaml:"postoverflows"` + LogFile string `yaml:"log_file"` + LogType string `yaml:"log_type"` + Labels map[string]string `yaml:"labels"` + IgnoreParsers bool `yaml:"ignore_parsers"` // if we test a scenario, we don't want to assert on Parser + OverrideStatics []types.ExtraField `yaml:"override_statics"` //Allow to override statics. Executed before s00 +} + +type HubIndex struct { + Data map[string]map[string]cwhub.Item +} + +type HubTestItem struct { + Name string + Path string + + CrowdSecPath string + CscliPath string + + RuntimePath string + RuntimeHubPath string + RuntimeDataPath string + RuntimePatternsPath string + RuntimeConfigFilePath string + RuntimeProfileFilePath string + RuntimeSimulationFilePath string + RuntimeHubConfig *csconfig.Hub + + ResultsPath string + ParserResultFile string + ScenarioResultFile string + BucketPourResultFile string + + HubPath string + HubTestPath string + HubIndexFile string + TemplateConfigPath string + TemplateProfilePath string + TemplateSimulationPath string + HubIndex *HubIndex + + Config *HubTestItemConfig + + Success bool + ErrorsList []string + + AutoGen bool + ParserAssert *ParserAssert + ScenarioAssert *ScenarioAssert + + CustomItemsLocation []string +} + +const ( + ParserAssertFileName = "parser.assert" + ParserResultFileName = "parser-dump.yaml" + + ScenarioAssertFileName = "scenario.assert" + ScenarioResultFileName = "bucket-dump.yaml" + + BucketPourResultFileName = "bucketpour-dump.yaml" +) + +var crowdsecPatternsFolder = csconfig.DefaultConfigPath("patterns") + +func NewTest(name string, hubTest *HubTest) (*HubTestItem, error) { + testPath := filepath.Join(hubTest.HubTestPath, name) + runtimeFolder := filepath.Join(testPath, "runtime") + runtimeHubFolder := filepath.Join(runtimeFolder, "hub") + configFilePath := filepath.Join(testPath, "config.yaml") + resultPath := filepath.Join(testPath, "results") + + // read test configuration file + configFileData := &HubTestItemConfig{} + yamlFile, err := os.ReadFile(configFilePath) + if err != nil { + log.Printf("no config file found in '%s': %v", testPath, err) + } + err = yaml.Unmarshal(yamlFile, configFileData) + if err != nil { + return nil, fmt.Errorf("Unmarshal: %v", err) + } + + parserAssertFilePath := filepath.Join(testPath, ParserAssertFileName) + ParserAssert := NewParserAssert(parserAssertFilePath) + + scenarioAssertFilePath := filepath.Join(testPath, ScenarioAssertFileName) + ScenarioAssert := NewScenarioAssert(scenarioAssertFilePath) + return &HubTestItem{ + Name: name, + Path: testPath, + CrowdSecPath: hubTest.CrowdSecPath, + CscliPath: hubTest.CscliPath, + RuntimePath: filepath.Join(testPath, "runtime"), + RuntimeHubPath: runtimeHubFolder, + RuntimeDataPath: filepath.Join(runtimeFolder, "data"), + RuntimePatternsPath: filepath.Join(runtimeFolder, "patterns"), + RuntimeConfigFilePath: filepath.Join(runtimeFolder, "config.yaml"), + RuntimeProfileFilePath: filepath.Join(runtimeFolder, "profiles.yaml"), + RuntimeSimulationFilePath: filepath.Join(runtimeFolder, "simulation.yaml"), + ResultsPath: resultPath, + ParserResultFile: filepath.Join(resultPath, ParserResultFileName), + ScenarioResultFile: filepath.Join(resultPath, ScenarioResultFileName), + BucketPourResultFile: filepath.Join(resultPath, BucketPourResultFileName), + RuntimeHubConfig: &csconfig.Hub{ + HubDir: runtimeHubFolder, + ConfigDir: runtimeFolder, + HubIndexFile: hubTest.HubIndexFile, + DataDir: filepath.Join(runtimeFolder, "data"), + }, + Config: configFileData, + HubPath: hubTest.HubPath, + HubTestPath: hubTest.HubTestPath, + HubIndexFile: hubTest.HubIndexFile, + TemplateConfigPath: hubTest.TemplateConfigPath, + TemplateProfilePath: hubTest.TemplateProfilePath, + TemplateSimulationPath: hubTest.TemplateSimulationPath, + HubIndex: hubTest.HubIndex, + ScenarioAssert: ScenarioAssert, + ParserAssert: ParserAssert, + CustomItemsLocation: []string{hubTest.HubPath, testPath}, + }, nil +} + +func (t *HubTestItem) InstallHub() error { + // install parsers in runtime environment + for _, parser := range t.Config.Parsers { + if parser == "" { + continue + } + var parserDirDest string + if hubParser, ok := t.HubIndex.Data[cwhub.PARSERS][parser]; ok { + parserSource, err := filepath.Abs(filepath.Join(t.HubPath, hubParser.RemotePath)) + if err != nil { + return fmt.Errorf("can't get absolute path of '%s': %s", parserSource, err) + } + parserFileName := filepath.Base(parserSource) + + // runtime/hub/parsers/s00-raw/crowdsecurity/ + hubDirParserDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubParser.RemotePath)) + + // runtime/parsers/s00-raw/ + parserDirDest = fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, hubParser.Stage) + + if err := os.MkdirAll(hubDirParserDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %s", hubDirParserDest, err) + } + if err := os.MkdirAll(parserDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %s", parserDirDest, err) + } + + // runtime/hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml + hubDirParserPath := filepath.Join(hubDirParserDest, parserFileName) + if err := Copy(parserSource, hubDirParserPath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %s", parserSource, hubDirParserPath, err) + } + + // runtime/parsers/s00-raw/syslog-logs.yaml + parserDirParserPath := filepath.Join(parserDirDest, parserFileName) + if err := os.Symlink(hubDirParserPath, parserDirParserPath); err != nil { + if !os.IsExist(err) { + return fmt.Errorf("unable to symlink parser '%s' to '%s': %s", hubDirParserPath, parserDirParserPath, err) + } + } + } else { + customParserExist := false + for _, customPath := range t.CustomItemsLocation { + // we check if its a custom parser + customParserPath := filepath.Join(customPath, parser) + if _, err := os.Stat(customParserPath); os.IsNotExist(err) { + continue + //return fmt.Errorf("parser '%s' doesn't exist in the hub and doesn't appear to be a custom one.", parser) + } + + customParserPathSplit, customParserName := filepath.Split(customParserPath) + // because path is parsers///parser.yaml and we wan't the stage + splittedPath := strings.Split(customParserPathSplit, string(os.PathSeparator)) + customParserStage := splittedPath[len(splittedPath)-3] + + // check if stage exist + hubStagePath := filepath.Join(t.HubPath, fmt.Sprintf("parsers/%s", customParserStage)) + + if _, err := os.Stat(hubStagePath); os.IsNotExist(err) { + continue + //return fmt.Errorf("stage '%s' extracted from '%s' doesn't exist in the hub", customParserStage, hubStagePath) + } + + parserDirDest = fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, customParserStage) + if err := os.MkdirAll(parserDirDest, os.ModePerm); err != nil { + continue + //return fmt.Errorf("unable to create folder '%s': %s", parserDirDest, err) + } + + customParserDest := filepath.Join(parserDirDest, customParserName) + // if path to parser exist, copy it + if err := Copy(customParserPath, customParserDest); err != nil { + continue + //return fmt.Errorf("unable to copy custom parser '%s' to '%s': %s", customParserPath, customParserDest, err) + } + + customParserExist = true + break + } + if !customParserExist { + return fmt.Errorf("couldn't find custom parser '%s' in the following location: %+v", parser, t.CustomItemsLocation) + } + } + } + + // install scenarios in runtime environment + for _, scenario := range t.Config.Scenarios { + if scenario == "" { + continue + } + var scenarioDirDest string + if hubScenario, ok := t.HubIndex.Data[cwhub.SCENARIOS][scenario]; ok { + scenarioSource, err := filepath.Abs(filepath.Join(t.HubPath, hubScenario.RemotePath)) + if err != nil { + return fmt.Errorf("can't get absolute path to: %s", scenarioSource) + } + scenarioFileName := filepath.Base(scenarioSource) + + // runtime/hub/scenarios/crowdsecurity/ + hubDirScenarioDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubScenario.RemotePath)) + + // runtime/parsers/scenarios/ + scenarioDirDest = fmt.Sprintf("%s/scenarios/", t.RuntimePath) + + if err := os.MkdirAll(hubDirScenarioDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %s", hubDirScenarioDest, err) + } + if err := os.MkdirAll(scenarioDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %s", scenarioDirDest, err) + } + + // runtime/hub/scenarios/crowdsecurity/ssh-bf.yaml + hubDirScenarioPath := filepath.Join(hubDirScenarioDest, scenarioFileName) + if err := Copy(scenarioSource, hubDirScenarioPath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %s", scenarioSource, hubDirScenarioPath, err) + } + + // runtime/scenarios/ssh-bf.yaml + scenarioDirParserPath := filepath.Join(scenarioDirDest, scenarioFileName) + if err := os.Symlink(hubDirScenarioPath, scenarioDirParserPath); err != nil { + if !os.IsExist(err) { + return fmt.Errorf("unable to symlink scenario '%s' to '%s': %s", hubDirScenarioPath, scenarioDirParserPath, err) + } + } + } else { + customScenarioExist := false + for _, customPath := range t.CustomItemsLocation { + // we check if its a custom scenario + customScenarioPath := filepath.Join(customPath, scenario) + if _, err := os.Stat(customScenarioPath); os.IsNotExist(err) { + continue + //return fmt.Errorf("scenarios '%s' doesn't exist in the hub and doesn't appear to be a custom one.", scenario) + } + + scenarioDirDest = fmt.Sprintf("%s/scenarios/", t.RuntimePath) + if err := os.MkdirAll(scenarioDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %s", scenarioDirDest, err) + } + + scenarioFileName := filepath.Base(customScenarioPath) + scenarioFileDest := filepath.Join(scenarioDirDest, scenarioFileName) + if err := Copy(customScenarioPath, scenarioFileDest); err != nil { + continue + //return fmt.Errorf("unable to copy scenario from '%s' to '%s': %s", customScenarioPath, scenarioFileDest, err) + } + customScenarioExist = true + break + } + if !customScenarioExist { + return fmt.Errorf("couldn't find custom scenario '%s' in the following location: %+v", scenario, t.CustomItemsLocation) + } + } + } + + // install postoverflows in runtime environment + for _, postoverflow := range t.Config.PostOVerflows { + if postoverflow == "" { + continue + } + var postoverflowDirDest string + if hubPostOverflow, ok := t.HubIndex.Data[cwhub.PARSERS_OVFLW][postoverflow]; ok { + postoverflowSource, err := filepath.Abs(filepath.Join(t.HubPath, hubPostOverflow.RemotePath)) + if err != nil { + return fmt.Errorf("can't get absolute path of '%s': %s", postoverflowSource, err) + } + postoverflowFileName := filepath.Base(postoverflowSource) + + // runtime/hub/postoverflows/s00-enrich/crowdsecurity/ + hubDirPostoverflowDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubPostOverflow.RemotePath)) + + // runtime/postoverflows/s00-enrich + postoverflowDirDest = fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, hubPostOverflow.Stage) + + if err := os.MkdirAll(hubDirPostoverflowDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %s", hubDirPostoverflowDest, err) + } + if err := os.MkdirAll(postoverflowDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %s", postoverflowDirDest, err) + } + + // runtime/hub/postoverflows/s00-enrich/crowdsecurity/rdns.yaml + hubDirPostoverflowPath := filepath.Join(hubDirPostoverflowDest, postoverflowFileName) + if err := Copy(postoverflowSource, hubDirPostoverflowPath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %s", postoverflowSource, hubDirPostoverflowPath, err) + } + + // runtime/postoverflows/s00-enrich/rdns.yaml + postoverflowDirParserPath := filepath.Join(postoverflowDirDest, postoverflowFileName) + if err := os.Symlink(hubDirPostoverflowPath, postoverflowDirParserPath); err != nil { + if !os.IsExist(err) { + return fmt.Errorf("unable to symlink postoverflow '%s' to '%s': %s", hubDirPostoverflowPath, postoverflowDirParserPath, err) + } + } + } else { + customPostoverflowExist := false + for _, customPath := range t.CustomItemsLocation { + // we check if its a custom postoverflow + customPostOverflowPath := filepath.Join(customPath, postoverflow) + if _, err := os.Stat(customPostOverflowPath); os.IsNotExist(err) { + continue + //return fmt.Errorf("postoverflow '%s' doesn't exist in the hub and doesn't appear to be a custom one.", postoverflow) + } + + customPostOverflowPathSplit := strings.Split(customPostOverflowPath, "/") + customPostoverflowName := customPostOverflowPathSplit[len(customPostOverflowPathSplit)-1] + // because path is postoverflows///parser.yaml and we wan't the stage + customPostoverflowStage := customPostOverflowPathSplit[len(customPostOverflowPathSplit)-3] + + // check if stage exist + hubStagePath := filepath.Join(t.HubPath, fmt.Sprintf("postoverflows/%s", customPostoverflowStage)) + + if _, err := os.Stat(hubStagePath); os.IsNotExist(err) { + continue + //return fmt.Errorf("stage '%s' from extracted '%s' doesn't exist in the hub", customPostoverflowStage, hubStagePath) + } + + postoverflowDirDest = fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, customPostoverflowStage) + if err := os.MkdirAll(postoverflowDirDest, os.ModePerm); err != nil { + continue + //return fmt.Errorf("unable to create folder '%s': %s", postoverflowDirDest, err) + } + + customPostoverflowDest := filepath.Join(postoverflowDirDest, customPostoverflowName) + // if path to postoverflow exist, copy it + if err := Copy(customPostOverflowPath, customPostoverflowDest); err != nil { + continue + //return fmt.Errorf("unable to copy custom parser '%s' to '%s': %s", customPostOverflowPath, customPostoverflowDest, err) + } + customPostoverflowExist = true + break + } + if !customPostoverflowExist { + return fmt.Errorf("couldn't find custom postoverflow '%s' in the following location: %+v", postoverflow, t.CustomItemsLocation) + } + } + } + + if len(t.Config.OverrideStatics) > 0 { + n := parser.Node{ + Name: "overrides", + Filter: "1==1", + Statics: t.Config.OverrideStatics, + } + b, err := yaml.Marshal(n) + if err != nil { + return fmt.Errorf("unable to marshal overrides: %s", err) + } + tgtFilename := fmt.Sprintf("%s/parsers/s00-raw/00_overrides.yaml", t.RuntimePath) + if err := os.WriteFile(tgtFilename, b, os.ModePerm); err != nil { + return fmt.Errorf("unable to write overrides to '%s': %s", tgtFilename, err) + } + } + + // load installed hub + err := cwhub.GetHubIdx(t.RuntimeHubConfig) + if err != nil { + log.Fatalf("can't local sync the hub: %+v", err) + } + + // install data for parsers if needed + ret := cwhub.GetItemMap(cwhub.PARSERS) + for parserName, item := range ret { + if item.Installed { + if err := cwhub.DownloadDataIfNeeded(t.RuntimeHubConfig, item, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", parserName, err) + } + log.Debugf("parser '%s' installed successfully in runtime environment", parserName) + } + } + + // install data for scenarios if needed + ret = cwhub.GetItemMap(cwhub.SCENARIOS) + for scenarioName, item := range ret { + if item.Installed { + if err := cwhub.DownloadDataIfNeeded(t.RuntimeHubConfig, item, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", scenarioName, err) + } + log.Debugf("scenario '%s' installed successfully in runtime environment", scenarioName) + } + } + + // install data for postoverflows if needed + ret = cwhub.GetItemMap(cwhub.PARSERS_OVFLW) + for postoverflowName, item := range ret { + if item.Installed { + if err := cwhub.DownloadDataIfNeeded(t.RuntimeHubConfig, item, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", postoverflowName, err) + } + log.Debugf("postoverflow '%s' installed successfully in runtime environment", postoverflowName) + } + } + + return nil +} + +func (t *HubTestItem) Clean() error { + return os.RemoveAll(t.RuntimePath) +} + +func (t *HubTestItem) Run() error { + t.Success = false + t.ErrorsList = make([]string, 0) + + testPath := filepath.Join(t.HubTestPath, t.Name) + if _, err := os.Stat(testPath); os.IsNotExist(err) { + return fmt.Errorf("test '%s' doesn't exist in '%s', exiting", t.Name, t.HubTestPath) + } + + currentDir, err := os.Getwd() + if err != nil { + return fmt.Errorf("can't get current directory: %+v", err) + } + + // create runtime folder + if err := os.MkdirAll(t.RuntimePath, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimePath, err) + } + + // create runtime data folder + if err := os.MkdirAll(t.RuntimeDataPath, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimeDataPath, err) + } + + // create runtime hub folder + if err := os.MkdirAll(t.RuntimeHubPath, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimeHubPath, err) + } + + if err := Copy(t.HubIndexFile, filepath.Join(t.RuntimeHubPath, ".index.json")); err != nil { + return fmt.Errorf("unable to copy .index.json file in '%s': %s", filepath.Join(t.RuntimeHubPath, ".index.json"), err) + } + + // create results folder + if err := os.MkdirAll(t.ResultsPath, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %+v", t.ResultsPath, err) + } + + // copy template config file to runtime folder + if err := Copy(t.TemplateConfigPath, t.RuntimeConfigFilePath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateConfigPath, t.RuntimeConfigFilePath, err) + } + + // copy template profile file to runtime folder + if err := Copy(t.TemplateProfilePath, t.RuntimeProfileFilePath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateProfilePath, t.RuntimeProfileFilePath, err) + } + + // copy template simulation file to runtime folder + if err := Copy(t.TemplateSimulationPath, t.RuntimeSimulationFilePath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateSimulationPath, t.RuntimeSimulationFilePath, err) + } + + // copy template patterns folder to runtime folder + if err := CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %s", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + } + + // install the hub in the runtime folder + if err := t.InstallHub(); err != nil { + return fmt.Errorf("unable to install hub in '%s': %s", t.RuntimeHubPath, err) + } + + logFile := t.Config.LogFile + logType := t.Config.LogType + dsn := fmt.Sprintf("file://%s", logFile) + + if err := os.Chdir(testPath); err != nil { + return fmt.Errorf("can't 'cd' to '%s': %s", testPath, err) + } + + logFileStat, err := os.Stat(logFile) + if err != nil { + return fmt.Errorf("unable to stat log file '%s': %s", logFile, err) + } + if logFileStat.Size() == 0 { + return fmt.Errorf("Log file '%s' is empty, please fill it with log", logFile) + } + + cmdArgs := []string{"-c", t.RuntimeConfigFilePath, "machines", "add", "testMachine", "--auto"} + cscliRegisterCmd := exec.Command(t.CscliPath, cmdArgs...) + log.Debugf("%s", cscliRegisterCmd.String()) + output, err := cscliRegisterCmd.CombinedOutput() + if err != nil { + if !strings.Contains(string(output), "unable to create machine: user 'testMachine': user already exist") { + fmt.Println(string(output)) + return fmt.Errorf("fail to run '%s' for test '%s': %v", cscliRegisterCmd.String(), t.Name, err) + } + } + + cmdArgs = []string{"-c", t.RuntimeConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", t.ResultsPath} + for labelKey, labelValue := range t.Config.Labels { + arg := fmt.Sprintf("%s:%s", labelKey, labelValue) + cmdArgs = append(cmdArgs, "-label", arg) + } + crowdsecCmd := exec.Command(t.CrowdSecPath, cmdArgs...) + log.Debugf("%s", crowdsecCmd.String()) + output, err = crowdsecCmd.CombinedOutput() + if log.GetLevel() >= log.DebugLevel || err != nil { + fmt.Println(string(output)) + } + if err != nil { + return fmt.Errorf("fail to run '%s' for test '%s': %v", crowdsecCmd.String(), t.Name, err) + } + + if err := os.Chdir(currentDir); err != nil { + return fmt.Errorf("can't 'cd' to '%s': %s", currentDir, err) + } + + // assert parsers + if !t.Config.IgnoreParsers { + _, err := os.Stat(t.ParserAssert.File) + if os.IsNotExist(err) { + parserAssertFile, err := os.Create(t.ParserAssert.File) + if err != nil { + log.Fatal(err) + } + parserAssertFile.Close() + } + assertFileStat, err := os.Stat(t.ParserAssert.File) + if err != nil { + return fmt.Errorf("error while stats '%s': %s", t.ParserAssert.File, err) + } + + if assertFileStat.Size() == 0 { + assertData, err := t.ParserAssert.AutoGenFromFile(t.ParserResultFile) + if err != nil { + return fmt.Errorf("couldn't generate assertion: %s", err) + } + t.ParserAssert.AutoGenAssertData = assertData + t.ParserAssert.AutoGenAssert = true + } else { + if err := t.ParserAssert.AssertFile(t.ParserResultFile); err != nil { + return fmt.Errorf("unable to run assertion on file '%s': %s", t.ParserResultFile, err) + } + } + } + + // assert scenarios + nbScenario := 0 + for _, scenario := range t.Config.Scenarios { + if scenario == "" { + continue + } + nbScenario += 1 + } + if nbScenario > 0 { + _, err := os.Stat(t.ScenarioAssert.File) + if os.IsNotExist(err) { + scenarioAssertFile, err := os.Create(t.ScenarioAssert.File) + if err != nil { + log.Fatal(err) + } + scenarioAssertFile.Close() + } + assertFileStat, err := os.Stat(t.ScenarioAssert.File) + if err != nil { + return fmt.Errorf("error while stats '%s': %s", t.ScenarioAssert.File, err) + } + + if assertFileStat.Size() == 0 { + assertData, err := t.ScenarioAssert.AutoGenFromFile(t.ScenarioResultFile) + if err != nil { + return fmt.Errorf("couldn't generate assertion: %s", err) + } + t.ScenarioAssert.AutoGenAssertData = assertData + t.ScenarioAssert.AutoGenAssert = true + } else { + if err := t.ScenarioAssert.AssertFile(t.ScenarioResultFile); err != nil { + return fmt.Errorf("unable to run assertion on file '%s': %s", t.ScenarioResultFile, err) + } + } + } + + if t.ParserAssert.AutoGenAssert || t.ScenarioAssert.AutoGenAssert { + t.AutoGen = true + } + + if (t.ParserAssert.Success || t.Config.IgnoreParsers) && (nbScenario == 0 || t.ScenarioAssert.Success) { + t.Success = true + } + + return nil +} diff --git a/pkg/hubtest/parser_assert.go b/pkg/hubtest/parser_assert.go new file mode 100644 index 0000000..766f47e --- /dev/null +++ b/pkg/hubtest/parser_assert.go @@ -0,0 +1,464 @@ +package hubtest + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "sort" + "strings" + "time" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/enescakir/emoji" + "github.com/fatih/color" + "github.com/pkg/errors" + diff "github.com/r3labs/diff/v2" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +type AssertFail struct { + File string + Line int + Expression string + Debug map[string]string +} + +type ParserAssert struct { + File string + AutoGenAssert bool + AutoGenAssertData string + NbAssert int + Fails []AssertFail + Success bool + TestData *ParserResults +} + +type ParserResult struct { + Evt types.Event + Success bool +} +type ParserResults map[string]map[string][]ParserResult + +func NewParserAssert(file string) *ParserAssert { + + ParserAssert := &ParserAssert{ + File: file, + NbAssert: 0, + Success: false, + Fails: make([]AssertFail, 0), + AutoGenAssert: false, + TestData: &ParserResults{}, + } + return ParserAssert +} + +func (p *ParserAssert) AutoGenFromFile(filename string) (string, error) { + err := p.LoadTest(filename) + if err != nil { + return "", err + } + ret := p.AutoGenParserAssert() + return ret, nil +} + +func (p *ParserAssert) LoadTest(filename string) error { + var err error + parserDump, err := LoadParserDump(filename) + if err != nil { + return fmt.Errorf("loading parser dump file: %+v", err) + } + p.TestData = parserDump + return nil +} + +func (p *ParserAssert) AssertFile(testFile string) error { + file, err := os.Open(p.File) + + if err != nil { + return fmt.Errorf("failed to open") + } + + if err := p.LoadTest(testFile); err != nil { + return fmt.Errorf("unable to load parser dump file '%s': %s", testFile, err) + } + scanner := bufio.NewScanner(file) + scanner.Split(bufio.ScanLines) + nbLine := 0 + for scanner.Scan() { + nbLine += 1 + if scanner.Text() == "" { + continue + } + ok, err := p.Run(scanner.Text()) + if err != nil { + return fmt.Errorf("unable to run assert '%s': %+v", scanner.Text(), err) + } + p.NbAssert += 1 + if !ok { + log.Debugf("%s is FALSE", scanner.Text()) + //fmt.SPrintf(" %s '%s'\n", emoji.RedSquare, scanner.Text()) + failedAssert := &AssertFail{ + File: p.File, + Line: nbLine, + Expression: scanner.Text(), + Debug: make(map[string]string), + } + variableRE := regexp.MustCompile(`(?P[^ =]+) == .*`) + match := variableRE.FindStringSubmatch(scanner.Text()) + if len(match) == 0 { + log.Infof("Couldn't get variable of line '%s'", scanner.Text()) + } + variable := match[1] + result, err := p.EvalExpression(variable) + if err != nil { + log.Errorf("unable to evaluate variable '%s': %s", variable, err) + continue + } + failedAssert.Debug[variable] = result + p.Fails = append(p.Fails, *failedAssert) + continue + } + //fmt.Printf(" %s '%s'\n", emoji.GreenSquare, scanner.Text()) + + } + file.Close() + if p.NbAssert == 0 { + assertData, err := p.AutoGenFromFile(testFile) + if err != nil { + return fmt.Errorf("couldn't generate assertion: %s", err) + } + p.AutoGenAssertData = assertData + p.AutoGenAssert = true + } + if len(p.Fails) == 0 { + p.Success = true + } + + return nil +} + +func (p *ParserAssert) RunExpression(expression string) (interface{}, error) { + var err error + //debug doesn't make much sense with the ability to evaluate "on the fly" + //var debugFilter *exprhelpers.ExprDebugger + var runtimeFilter *vm.Program + var output interface{} + + env := map[string]interface{}{"results": *p.TestData} + + if runtimeFilter, err = expr.Compile(expression, expr.Env(exprhelpers.GetExprEnv(env))); err != nil { + return output, err + } + + //dump opcode in trace level + log.Tracef("%s", runtimeFilter.Disassemble()) + + output, err = expr.Run(runtimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"results": *p.TestData})) + if err != nil { + log.Warningf("running : %s", expression) + log.Warningf("runtime error : %s", err) + return output, errors.Wrapf(err, "while running expression %s", expression) + } + return output, nil +} + +func (p *ParserAssert) EvalExpression(expression string) (string, error) { + output, err := p.RunExpression(expression) + if err != nil { + return "", err + } + ret, err := yaml.Marshal(output) + if err != nil { + return "", err + } + return string(ret), nil +} + +func (p *ParserAssert) Run(assert string) (bool, error) { + output, err := p.RunExpression(assert) + if err != nil { + return false, err + } + switch out := output.(type) { + case bool: + return out, nil + default: + return false, fmt.Errorf("assertion '%s' is not a condition", assert) + } +} + +func Escape(val string) string { + val = strings.ReplaceAll(val, `\`, `\\`) + val = strings.ReplaceAll(val, `"`, `\"`) + return val +} + +func (p *ParserAssert) AutoGenParserAssert() string { + //attempt to autogen parser asserts + var ret string + + //sort map keys for consistent ordre + var stages []string + for stage := range *p.TestData { + stages = append(stages, stage) + } + sort.Strings(stages) + ret += fmt.Sprintf("len(results) == %d\n", len(*p.TestData)) + for _, stage := range stages { + parsers := (*p.TestData)[stage] + //sort map keys for consistent ordre + var pnames []string + for pname := range parsers { + pnames = append(pnames, pname) + } + sort.Strings(pnames) + for _, parser := range pnames { + presults := parsers[parser] + ret += fmt.Sprintf(`len(results["%s"]["%s"]) == %d`+"\n", stage, parser, len(presults)) + for pidx, result := range presults { + ret += fmt.Sprintf(`results["%s"]["%s"][%d].Success == %t`+"\n", stage, parser, pidx, result.Success) + + if !result.Success { + continue + } + for pkey, pval := range result.Evt.Parsed { + if pval == "" { + continue + } + ret += fmt.Sprintf(`results["%s"]["%s"][%d].Evt.Parsed["%s"] == "%s"`+"\n", stage, parser, pidx, pkey, Escape(pval)) + } + for mkey, mval := range result.Evt.Meta { + if mval == "" { + continue + } + ret += fmt.Sprintf(`results["%s"]["%s"][%d].Evt.Meta["%s"] == "%s"`+"\n", stage, parser, pidx, mkey, Escape(mval)) + } + for ekey, eval := range result.Evt.Enriched { + if eval == "" { + continue + } + ret += fmt.Sprintf(`results["%s"]["%s"][%d].Evt.Enriched["%s"] == "%s"`+"\n", stage, parser, pidx, ekey, Escape(eval)) + } + } + } + } + return ret +} + +func LoadParserDump(filepath string) (*ParserResults, error) { + var pdump ParserResults + + dumpData, err := os.Open(filepath) + if err != nil { + return nil, err + } + defer dumpData.Close() + + results, err := io.ReadAll(dumpData) + if err != nil { + return nil, err + } + + if err := yaml.Unmarshal(results, &pdump); err != nil { + return nil, err + } + return &pdump, nil +} + +type DumpOpts struct { + Details bool + SkipOk bool +} + +func DumpTree(parser_results ParserResults, bucket_pour BucketPourInfo, opts DumpOpts) { + //note : we can use line -> time as the unique identifier (of acquisition) + + state := make(map[time.Time]map[string]map[string]ParserResult) + assoc := make(map[time.Time]string, 0) + + for stage, parsers := range parser_results { + for parser, results := range parsers { + for _, parser_res := range results { + evt := parser_res.Evt + if _, ok := state[evt.Line.Time]; !ok { + state[evt.Line.Time] = make(map[string]map[string]ParserResult) + assoc[evt.Line.Time] = evt.Line.Raw + } + if _, ok := state[evt.Line.Time][stage]; !ok { + state[evt.Line.Time][stage] = make(map[string]ParserResult) + } + state[evt.Line.Time][stage][parser] = ParserResult{Evt: evt, Success: parser_res.Success} + } + + } + } + + for bname, evtlist := range bucket_pour { + for _, evt := range evtlist { + if evt.Line.Raw == "" { + continue + } + //it might be bucket overflow being reprocessed, skip this + if _, ok := state[evt.Line.Time]; !ok { + state[evt.Line.Time] = make(map[string]map[string]ParserResult) + assoc[evt.Line.Time] = evt.Line.Raw + } + //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered + if _, ok := state[evt.Line.Time]["buckets"]; !ok { + state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) + } + state[evt.Line.Time]["buckets"][bname] = ParserResult{Success: true} + } + } + yellow := color.New(color.FgYellow).SprintFunc() + red := color.New(color.FgRed).SprintFunc() + green := color.New(color.FgGreen).SprintFunc() + //get each line + for tstamp, rawstr := range assoc { + if opts.SkipOk { + if _, ok := state[tstamp]["buckets"]["OK"]; ok { + continue + } + } + fmt.Printf("line: %s\n", rawstr) + skeys := make([]string, 0, len(state[tstamp])) + for k := range state[tstamp] { + //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered + if k == "buckets" { + continue + } + skeys = append(skeys, k) + } + sort.Strings(skeys) + //iterate stage + var prev_item types.Event + + for _, stage := range skeys { + parsers := state[tstamp][stage] + + sep := "├" + presep := "|" + + fmt.Printf("\t%s %s\n", sep, stage) + + pkeys := make([]string, 0, len(parsers)) + for k := range parsers { + pkeys = append(pkeys, k) + } + sort.Strings(pkeys) + + for idx, parser := range pkeys { + res := parsers[parser].Success + sep := "├" + if idx == len(pkeys)-1 { + sep = "└" + } + created := 0 + updated := 0 + deleted := 0 + whitelisted := false + changeStr := "" + detailsDisplay := "" + + if res { + if prev_item.Stage == "" { + changeStr = "first_parser" + } else { + changelog, _ := diff.Diff(prev_item, parsers[parser].Evt) + for _, change := range changelog { + switch change.Type { + case "create": + created++ + detailsDisplay += fmt.Sprintf("\t%s\t\t%s %s evt.%s : %s\n", presep, sep, change.Type, strings.Join(change.Path, "."), green(change.To)) + case "update": + detailsDisplay += fmt.Sprintf("\t%s\t\t%s %s evt.%s : %s -> %s\n", presep, sep, change.Type, strings.Join(change.Path, "."), change.From, yellow(change.To)) + if change.Path[0] == "Whitelisted" && change.To == true { + whitelisted = true + } + updated++ + case "delete": + deleted++ + detailsDisplay += fmt.Sprintf("\t%s\t\t%s %s evt.%s\n", presep, sep, change.Type, red(strings.Join(change.Path, "."))) + } + } + } + prev_item = parsers[parser].Evt + } + + if created > 0 { + changeStr += green(fmt.Sprintf("+%d", created)) + } + if updated > 0 { + if len(changeStr) > 0 { + changeStr += " " + } + changeStr += yellow(fmt.Sprintf("~%d", updated)) + } + if deleted > 0 { + if len(changeStr) > 0 { + changeStr += " " + } + changeStr += red(fmt.Sprintf("-%d", deleted)) + } + if whitelisted { + if len(changeStr) > 0 { + changeStr += " " + } + changeStr += red("[whitelisted]") + } + if changeStr == "" { + changeStr = yellow("unchanged") + } + if res { + fmt.Printf("\t%s\t%s %s %s (%s)\n", presep, sep, emoji.GreenCircle, parser, changeStr) + if opts.Details { + fmt.Print(detailsDisplay) + } + } else { + fmt.Printf("\t%s\t%s %s %s\n", presep, sep, emoji.RedCircle, parser) + + } + } + } + sep := "└" + if len(state[tstamp]["buckets"]) > 0 { + sep = "├" + } + //did the event enter the bucket pour phase ? + if _, ok := state[tstamp]["buckets"]["OK"]; ok { + fmt.Printf("\t%s-------- parser success %s\n", sep, emoji.GreenCircle) + } else { + fmt.Printf("\t%s-------- parser failure %s\n", sep, emoji.RedCircle) + } + //now print bucket info + if len(state[tstamp]["buckets"]) > 0 { + fmt.Printf("\t├ Scenarios\n") + } + bnames := make([]string, 0, len(state[tstamp]["buckets"])) + for k := range state[tstamp]["buckets"] { + //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered + if k == "OK" { + continue + } + bnames = append(bnames, k) + } + sort.Strings(bnames) + for idx, bname := range bnames { + sep := "├" + if idx == len(bnames)-1 { + sep = "└" + } + fmt.Printf("\t\t%s %s %s\n", sep, emoji.GreenCircle, bname) + } + fmt.Println() + } +} diff --git a/pkg/hubtest/scenario_assert.go b/pkg/hubtest/scenario_assert.go new file mode 100644 index 0000000..4b8d899 --- /dev/null +++ b/pkg/hubtest/scenario_assert.go @@ -0,0 +1,273 @@ +package hubtest + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "sort" + "strings" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +type ScenarioAssert struct { + File string + AutoGenAssert bool + AutoGenAssertData string + NbAssert int + Fails []AssertFail + Success bool + TestData *BucketResults + PourData *BucketPourInfo +} + +type BucketResults []types.Event +type BucketPourInfo map[string][]types.Event + +func NewScenarioAssert(file string) *ScenarioAssert { + ScenarioAssert := &ScenarioAssert{ + File: file, + NbAssert: 0, + Success: false, + Fails: make([]AssertFail, 0), + AutoGenAssert: false, + TestData: &BucketResults{}, + PourData: &BucketPourInfo{}, + } + return ScenarioAssert +} + +func (s *ScenarioAssert) AutoGenFromFile(filename string) (string, error) { + err := s.LoadTest(filename, "") + if err != nil { + return "", err + } + ret := s.AutoGenScenarioAssert() + return ret, nil +} + +func (s *ScenarioAssert) LoadTest(filename string, bucketpour string) error { + var err error + bucketDump, err := LoadScenarioDump(filename) + if err != nil { + return fmt.Errorf("loading scenario dump file '%s': %+v", filename, err) + } + s.TestData = bucketDump + + if bucketpour != "" { + pourDump, err := LoadBucketPourDump(bucketpour) + if err != nil { + return fmt.Errorf("loading bucket pour dump file '%s': %+v", filename, err) + } + s.PourData = pourDump + } + return nil +} + +func (s *ScenarioAssert) AssertFile(testFile string) error { + file, err := os.Open(s.File) + + if err != nil { + return fmt.Errorf("failed to open") + } + + if err := s.LoadTest(testFile, ""); err != nil { + return fmt.Errorf("unable to load parser dump file '%s': %s", testFile, err) + } + scanner := bufio.NewScanner(file) + scanner.Split(bufio.ScanLines) + nbLine := 0 + for scanner.Scan() { + nbLine += 1 + if scanner.Text() == "" { + continue + } + ok, err := s.Run(scanner.Text()) + if err != nil { + return fmt.Errorf("unable to run assert '%s': %+v", scanner.Text(), err) + } + s.NbAssert += 1 + if !ok { + log.Debugf("%s is FALSE", scanner.Text()) + failedAssert := &AssertFail{ + File: s.File, + Line: nbLine, + Expression: scanner.Text(), + Debug: make(map[string]string), + } + variableRE := regexp.MustCompile(`(?P[^ ]+) == .*`) + match := variableRE.FindStringSubmatch(scanner.Text()) + if len(match) == 0 { + log.Infof("Couldn't get variable of line '%s'", scanner.Text()) + continue + } + variable := match[1] + result, err := s.EvalExpression(variable) + if err != nil { + log.Errorf("unable to evaluate variable '%s': %s", variable, err) + continue + } + failedAssert.Debug[variable] = result + s.Fails = append(s.Fails, *failedAssert) + continue + } + //fmt.Printf(" %s '%s'\n", emoji.GreenSquare, scanner.Text()) + + } + file.Close() + if s.NbAssert == 0 { + assertData, err := s.AutoGenFromFile(testFile) + if err != nil { + return fmt.Errorf("couldn't generate assertion: %s", err) + } + s.AutoGenAssertData = assertData + s.AutoGenAssert = true + } + + if len(s.Fails) == 0 { + s.Success = true + } + + return nil +} + +func (s *ScenarioAssert) RunExpression(expression string) (interface{}, error) { + var err error + //debug doesn't make much sense with the ability to evaluate "on the fly" + //var debugFilter *exprhelpers.ExprDebugger + var runtimeFilter *vm.Program + var output interface{} + + env := map[string]interface{}{"results": *s.TestData} + + if runtimeFilter, err = expr.Compile(expression, expr.Env(exprhelpers.GetExprEnv(env))); err != nil { + return output, err + } + // if debugFilter, err = exprhelpers.NewDebugger(assert, expr.Env(exprhelpers.GetExprEnv(env))); err != nil { + // log.Warningf("Failed building debugher for %s : %s", assert, err) + // } + + //dump opcode in trace level + log.Tracef("%s", runtimeFilter.Disassemble()) + + output, err = expr.Run(runtimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"results": *s.TestData})) + if err != nil { + log.Warningf("running : %s", expression) + log.Warningf("runtime error : %s", err) + return output, errors.Wrapf(err, "while running expression %s", expression) + } + return output, nil +} + +func (s *ScenarioAssert) EvalExpression(expression string) (string, error) { + output, err := s.RunExpression(expression) + if err != nil { + return "", err + } + ret, err := yaml.Marshal(output) + if err != nil { + return "", err + } + return string(ret), nil +} + +func (s *ScenarioAssert) Run(assert string) (bool, error) { + output, err := s.RunExpression(assert) + if err != nil { + return false, err + } + switch out := output.(type) { + case bool: + return out, nil + default: + return false, fmt.Errorf("assertion '%s' is not a condition", assert) + } +} + +func (s *ScenarioAssert) AutoGenScenarioAssert() string { + //attempt to autogen parser asserts + var ret string + ret += fmt.Sprintf(`len(results) == %d`+"\n", len(*s.TestData)) + for eventIndex, event := range *s.TestData { + for ipSrc, source := range event.Overflow.Sources { + ret += fmt.Sprintf(`"%s" in results[%d].Overflow.GetSources()`+"\n", ipSrc, eventIndex) + ret += fmt.Sprintf(`results[%d].Overflow.Sources["%s"].IP == "%s"`+"\n", eventIndex, ipSrc, source.IP) + ret += fmt.Sprintf(`results[%d].Overflow.Sources["%s"].Range == "%s"`+"\n", eventIndex, ipSrc, source.Range) + ret += fmt.Sprintf(`results[%d].Overflow.Sources["%s"].GetScope() == "%s"`+"\n", eventIndex, ipSrc, *source.Scope) + ret += fmt.Sprintf(`results[%d].Overflow.Sources["%s"].GetValue() == "%s"`+"\n", eventIndex, ipSrc, *source.Value) + } + for evtIndex, evt := range event.Overflow.Alert.Events { + for _, meta := range evt.Meta { + ret += fmt.Sprintf(`results[%d].Overflow.Alert.Events[%d].GetMeta("%s") == "%s"`+"\n", eventIndex, evtIndex, meta.Key, meta.Value) + } + } + ret += fmt.Sprintf(`results[%d].Overflow.Alert.GetScenario() == "%s"`+"\n", eventIndex, *event.Overflow.Alert.Scenario) + ret += fmt.Sprintf(`results[%d].Overflow.Alert.Remediation == %t`+"\n", eventIndex, event.Overflow.Alert.Remediation) + ret += fmt.Sprintf(`results[%d].Overflow.Alert.GetEventsCount() == %d`+"\n", eventIndex, *event.Overflow.Alert.EventsCount) + } + return ret +} + +func (b BucketResults) Len() int { + return len(b) +} + +func (b BucketResults) Less(i, j int) bool { + return b[i].Overflow.Alert.GetScenario()+strings.Join(b[i].Overflow.GetSources(), "@") > b[j].Overflow.Alert.GetScenario()+strings.Join(b[j].Overflow.GetSources(), "@") +} + +func (b BucketResults) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func LoadBucketPourDump(filepath string) (*BucketPourInfo, error) { + var bucketDump BucketPourInfo + + dumpData, err := os.Open(filepath) + if err != nil { + return nil, err + } + defer dumpData.Close() + + results, err := io.ReadAll(dumpData) + if err != nil { + return nil, err + } + + if err := yaml.Unmarshal(results, &bucketDump); err != nil { + return nil, err + } + + return &bucketDump, nil +} + +func LoadScenarioDump(filepath string) (*BucketResults, error) { + var bucketDump BucketResults + + dumpData, err := os.Open(filepath) + if err != nil { + return nil, err + } + defer dumpData.Close() + + results, err := io.ReadAll(dumpData) + if err != nil { + return nil, err + } + + if err := yaml.Unmarshal(results, &bucketDump); err != nil { + return nil, err + } + + sort.Sort(bucketDump) + + return &bucketDump, nil +} diff --git a/pkg/hubtest/utils.go b/pkg/hubtest/utils.go new file mode 100644 index 0000000..73de351 --- /dev/null +++ b/pkg/hubtest/utils.go @@ -0,0 +1,107 @@ +package hubtest + +import ( + "fmt" + "os" + "path/filepath" +) + +func Copy(sourceFile string, destinationFile string) error { + input, err := os.ReadFile(sourceFile) + if err != nil { + return err + } + + err = os.WriteFile(destinationFile, input, 0644) + if err != nil { + return err + } + return nil +} + +// checkPathNotContained returns an error if 'subpath' is inside 'path' +func checkPathNotContained(path string, subpath string) error { + absPath, err := filepath.Abs(path) + if err != nil { + return err + } + + absSubPath, err := filepath.Abs(subpath) + if err != nil { + return err + } + + current := absSubPath + for { + if current == absPath { + return fmt.Errorf("cannot copy a folder onto itself") + } + up := filepath.Dir(current) + if current == up { + break + } + current = up + } + return nil +} + +func CopyDir(src string, dest string) error { + err := checkPathNotContained(src, dest) + if err != nil { + return err + } + + f, err := os.Open(src) + if err != nil { + return err + } + + file, err := f.Stat() + if err != nil { + return err + } + if !file.IsDir() { + return fmt.Errorf("Source " + file.Name() + " is not a directory!") + } + + err = os.MkdirAll(dest, 0755) + if err != nil { + return err + } + + files, err := os.ReadDir(src) + if err != nil { + return err + } + + for _, f := range files { + + if f.IsDir() { + + err = CopyDir(src+"/"+f.Name(), dest+"/"+f.Name()) + if err != nil { + return err + } + + } + + if !f.IsDir() { + + content, err := os.ReadFile(src + "/" + f.Name()) + if err != nil { + return err + + } + + err = os.WriteFile(dest+"/"+f.Name(), content, 0755) + if err != nil { + return err + + } + + } + + } + + return nil +} diff --git a/pkg/hubtest/utils_test.go b/pkg/hubtest/utils_test.go new file mode 100644 index 0000000..de4f1aa --- /dev/null +++ b/pkg/hubtest/utils_test.go @@ -0,0 +1,18 @@ +package hubtest + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCheckPathNotContained(t *testing.T) { + assert.Nil(t, checkPathNotContained("/foo", "/bar")) + assert.Nil(t, checkPathNotContained("/foo/bar", "/foo")) + assert.Nil(t, checkPathNotContained("/foo/bar", "/")) + assert.Nil(t, checkPathNotContained("/path/to/somewhere", "/path/to/somewhere-else")) + assert.Nil(t, checkPathNotContained("~/.local/path/to/somewhere", "~/.local/path/to/somewhere-else")) + assert.NotNil(t, checkPathNotContained("/foo", "/foo/bar")) + assert.NotNil(t, checkPathNotContained("/", "/foo")) + assert.NotNil(t, checkPathNotContained("/", "/foo/bar/baz")) +} diff --git a/pkg/leakybucket/README.md b/pkg/leakybucket/README.md new file mode 100644 index 0000000..5254f33 --- /dev/null +++ b/pkg/leakybucket/README.md @@ -0,0 +1,142 @@ +# Leakybuckets + +## Bucket concepts + +Leakybucket is used for decision making. Under certain conditions +enriched events are poured in these buckets. When these buckets are +full, we raise a new event. After this event is raised the bucket is +destroyed. There are many types of buckets, and we welcome any new +useful design of buckets. + +Usually the bucket configuration generates the creation of many +buckets. They are differenciated by a field called stackkey. When two +events arrives with the same stackkey they go in the same matching +bucket. + +The very purpose of these buckets is to detect clients that exceed a +certain rate of attemps to do something (ssh connection, http +authentication failure, etc...). Thus, the most use stackkey field is +often the source_ip. + +## Standard leaky buckets + +Default buckets have two main configuration options: + * capacity: number of events the bucket can hold. When the capacity + is reached and a new event is poured, a new event is raised. We + call this type of event overflow. This is an int. + * leakspeed: duration needed for an event to leak. When an event + leaks, it disappear from the bucket. + +## Trigger + +It's a special type of bucket with a zero capacity. Thus, when an +event is poured in a trigger, it always raises an overflow. + +## Uniq + +It's a bucket working as the standard leaky bucket except for one +thing: a filter returns a property for each event and only one +occurrence of this property is allowed in the bucket, thus the bucket +is called uniq. + +## Counter + +It's a special type of bucket with an infinite capacity and an +infinite leakspeed (it never overflows, neither leaks). Nevertheless, +the event is raised after a fixed duration. The option is called +duration. + +## Available configuration options for buckets + +### Fields for standard buckets + +* type: mandatory field. Must be one of "leaky", "trigger", "uniq" or + "counter" +* name: mandatory field, but the value is totally open. Nevertheless + this value will tag the events raised by the bucket. +* filter: mandatory field. It's a filter that is run when the decision + to make an event match the bucket or not. The filter have to return + a boolean. As a filter implementation we use + https://github.com/antonmedv/expr +* capacity: [mandatory for now, shouldn't be mandatory in the final + version] it's the size of the bucket. When pouring in a bucket + already with size events, it overflows. +* leakspeed: leakspeed is a time duration (has to be parseable by + https://golang.org/pkg/time/#ParseDuration). After each interval an + event is leaked from the bucket. +* stackkey: mandatory field. This field is used to differentiate on + which bucket ongoing events will be poured. When an unknown stackkey + is seen in an event a new bucket is created. +* on_overflow: optional field, that tells the what to do when the + bucket is returning the overflow event. As of today, the possibility + are these: "ban,1h", "Reprocess", "Delete". + Reprocess is used to send the raised event back in the event pool to + be matched agains buckets + +### Fields for special buckets + +#### Uniq + +Uniq has an extra field uniq_filter which is too use the filter +implementation from https://github.com/antonmedv/expr. The filter must +return a string. All strins returned by this filter in the same +buckets have to be different. Thus, if a string is seen twice it is +dismissed. + +#### Trigger + +Capacity and leakspeed are not relevant for this kind of bucket. + +#### Counter + +It's a special kind of bucket that raise an event and is destroyed +after a fixed duration. The configuration field used is duration and +must be parseable by https://golang.org/pkg/time/#ParseDuration. +Nevertheless, this kind of bucket is often used with an infinite +leakspeed and an infinite capacity [capacity set to -1 for now]. + + +## Add examples here + +``` +# ssh bruteforce +- type: leaky + name: ssh_bruteforce + filter: "Meta.log_type == 'ssh_failed-auth'" + leakspeed: "10s" + capacity: 5 + stackkey: "source_ip" + on_overflow: ban,1h + +# reporting of src_ip,dest_port seen +- type: counter + name: counter + filter: "Meta.service == 'tcp' && Event.new_connection == 'true'" + distinct: "Meta.source_ip + ':' + Meta.dest_port" + duration: 5m + capacity: -1 + +- type: trigger + name: "New connection" + filter: "Meta.service == 'tcp' && Event.new_connection == 'true'" + on_overflow: Reprocess +``` + +# Note on leakybuckets implementation + +[This is not dry enough to have many details here, but:] + +The bucket code is triggered by `InfiniBucketify` in main.go. +There's one struct called buckets which is for now a +`map[string]interface{}` that holds all buckets. The key of this map +is derivated from the filter configured for the bucket and its +stackkey. This looks like complicated, but in fact it allows us to use +only one structs. This is done in buckets.go. + +On top of that the implementation define only the standard leaky +bucket. A goroutine is launched for every buckets (bucket.go). This +goroutine manages the life of the bucket. + +For special buckets, hooks are defined at initialization time in +manager.go. Hooks are called when relevant by the bucket gorourine +when events are poured and/or when bucket overflows. \ No newline at end of file diff --git a/pkg/leakybucket/blackhole.go b/pkg/leakybucket/blackhole.go new file mode 100644 index 0000000..dd46d11 --- /dev/null +++ b/pkg/leakybucket/blackhole.go @@ -0,0 +1,68 @@ +package leakybucket + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +type HiddenKey struct { + key string + expiration time.Time +} + +type Blackhole struct { + duration time.Duration + hiddenKeys []HiddenKey + DumbProcessor +} + +func NewBlackhole(bucketFactory *BucketFactory) (*Blackhole, error) { + duration, err := time.ParseDuration(bucketFactory.Blackhole) + if err != nil { + bucketFactory.logger.Warning("Blackhole duration not valid, using 1h") + return nil, fmt.Errorf("blackhole duration not valid '%s'", bucketFactory.Blackhole) + } + return &Blackhole{ + duration: duration, + hiddenKeys: []HiddenKey{}, + DumbProcessor: DumbProcessor{}, + }, nil +} + +func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) { + return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) { + var blackholed bool = false + var tmp []HiddenKey + // search if we are blackholed and refresh the slice + for _, element := range bl.hiddenKeys { + + if element.key == leaky.Mapkey { + if element.expiration.After(leaky.Ovflw_ts) { + leaky.logger.Debugf("Overflow discarded, still blackholed for %s", element.expiration.Sub(leaky.Ovflw_ts)) + blackholed = true + } + } + + if element.expiration.After(leaky.Ovflw_ts) { + tmp = append(tmp, element) + } else { + leaky.logger.Debugf("%s left blackhole %s ago", element.key, leaky.Ovflw_ts.Sub(element.expiration)) + + } + } + bl.hiddenKeys = tmp + + if blackholed { + leaky.logger.Tracef("Event is blackholed (%s)", leaky.First_ts) + return types.RuntimeAlert{ + Mapkey: leaky.Mapkey, + }, nil + } + bl.hiddenKeys = append(bl.hiddenKeys, HiddenKey{leaky.Mapkey, leaky.Ovflw_ts.Add(bl.duration)}) + leaky.logger.Debugf("Adding overflow to blackhole (%s)", leaky.First_ts) + return alert, queue + } + +} diff --git a/pkg/leakybucket/bucket.go b/pkg/leakybucket/bucket.go new file mode 100644 index 0000000..7f2be17 --- /dev/null +++ b/pkg/leakybucket/bucket.go @@ -0,0 +1,366 @@ +package leakybucket + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + //"log" + "github.com/crowdsecurity/crowdsec/pkg/time/rate" + "github.com/crowdsecurity/crowdsec/pkg/types" + "gopkg.in/tomb.v2" + + //rate "time/rate" + + "github.com/davecgh/go-spew/spew" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + //"golang.org/x/time/rate" +) + +const ( + LIVE = iota + TIMEMACHINE +) + +// Leaky represents one instance of a bucket +type Leaky struct { + Name string + Mode int //LIVE or TIMEMACHINE + //the limiter is what holds the proper "leaky aspect", it determines when/if we can pour objects + Limiter rate.RateLimiter `json:"-"` + SerializedState rate.Lstate + //Queue is used to held the cache of objects in the bucket, it is used to know 'how many' objects we have in buffer. + Queue *Queue + //Leaky buckets are receiving message through a chan + In chan *types.Event `json:"-"` + //Leaky buckets are pushing their overflows through a chan + Out chan *Queue `json:"-"` + // shared for all buckets (the idea is to kill this afterwards) + AllOut chan types.Event `json:"-"` + //max capacity (for burst) + Capacity int + //CacheRatio is the number of elements that should be kept in memory (compared to capacity) + CacheSize int + //the unique identifier of the bucket (a hash) + Mapkey string + // chan for signaling + Signal chan bool `json:"-"` + Suicide chan bool `json:"-"` + Reprocess bool + Simulated bool + Uuid string + First_ts time.Time + Last_ts time.Time + Ovflw_ts time.Time + Total_count int + Leakspeed time.Duration + BucketConfig *BucketFactory + Duration time.Duration + Pour func(*Leaky, types.Event) `json:"-"` + //Profiling when set to true enables profiling of bucket + Profiling bool + timedOverflow bool + logger *log.Entry + scopeType types.ScopeType + hash string + scenarioVersion string + tomb *tomb.Tomb + wgPour *sync.WaitGroup + wgDumpState *sync.WaitGroup + mutex *sync.Mutex //used only for TIMEMACHINE mode to allow garbage collection without races +} + +var BucketsPour = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_bucket_poured_total", + Help: "Total events were poured in bucket.", + }, + []string{"source", "type", "name"}, +) + +var BucketsOverflow = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_bucket_overflowed_total", + Help: "Total buckets overflowed.", + }, + []string{"name"}, +) + +var BucketsCanceled = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_bucket_canceled_total", + Help: "Total buckets canceled.", + }, + []string{"name"}, +) + +var BucketsUnderflow = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_bucket_underflowed_total", + Help: "Total buckets underflowed.", + }, + []string{"name"}, +) + +var BucketsInstantiation = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_bucket_created_total", + Help: "Total buckets were instantiated.", + }, + []string{"name"}, +) + +var BucketsCurrentCount = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "cs_buckets", + Help: "Number of buckets that currently exist.", + }, + []string{"name"}, +) + +var LeakyRoutineCount int64 + +// Newleaky creates a new leaky bucket from a BucketFactory +// Events created by the bucket (overflow, bucket empty) are sent to a chan defined by BucketFactory +// The leaky bucket implementation is based on rate limiter (see https://godoc.org/golang.org/x/time/rate) +// There's a trick to have an event said when the bucket gets empty to allow its destruction +func NewLeaky(bucketFactory BucketFactory) *Leaky { + bucketFactory.logger.Tracef("Instantiating live bucket %s", bucketFactory.Name) + return FromFactory(bucketFactory) +} + +func FromFactory(bucketFactory BucketFactory) *Leaky { + var limiter rate.RateLimiter + //golang rate limiter. It's mainly intended for http rate limiter + Qsize := bucketFactory.Capacity + if bucketFactory.CacheSize > 0 { + //cache is smaller than actual capacity + if bucketFactory.CacheSize <= bucketFactory.Capacity { + Qsize = bucketFactory.CacheSize + //bucket might be counter (infinite size), allow cache limitation + } else if bucketFactory.Capacity == -1 { + Qsize = bucketFactory.CacheSize + } + } + if bucketFactory.Capacity == -1 { + //In this case we allow all events to pass. + //maybe in the future we could avoid using a limiter + limiter = &rate.AlwaysFull{} + } else { + limiter = rate.NewLimiter(rate.Every(bucketFactory.leakspeed), bucketFactory.Capacity) + } + BucketsInstantiation.With(prometheus.Labels{"name": bucketFactory.Name}).Inc() + + //create the leaky bucket per se + l := &Leaky{ + Name: bucketFactory.Name, + Limiter: limiter, + Uuid: seed.Generate(), + Queue: NewQueue(Qsize), + CacheSize: bucketFactory.CacheSize, + Out: make(chan *Queue, 1), + Suicide: make(chan bool, 1), + AllOut: bucketFactory.ret, + Capacity: bucketFactory.Capacity, + Leakspeed: bucketFactory.leakspeed, + BucketConfig: &bucketFactory, + Pour: Pour, + Reprocess: bucketFactory.Reprocess, + Profiling: bucketFactory.Profiling, + Mode: LIVE, + scopeType: bucketFactory.ScopeType, + scenarioVersion: bucketFactory.ScenarioVersion, + hash: bucketFactory.hash, + Simulated: bucketFactory.Simulated, + tomb: bucketFactory.tomb, + wgPour: bucketFactory.wgPour, + wgDumpState: bucketFactory.wgDumpState, + mutex: &sync.Mutex{}, + } + if l.BucketConfig.Capacity > 0 && l.BucketConfig.leakspeed != time.Duration(0) { + l.Duration = time.Duration(l.BucketConfig.Capacity+1) * l.BucketConfig.leakspeed + } + if l.BucketConfig.duration != time.Duration(0) { + l.Duration = l.BucketConfig.duration + l.timedOverflow = true + } + + return l +} + +/* for now mimic a leak routine */ +//LeakRoutine us the life of a bucket. It dies when the bucket underflows or overflows +func LeakRoutine(leaky *Leaky) error { + + var ( + durationTickerChan <-chan time.Time = make(<-chan time.Time) + durationTicker *time.Ticker + firstEvent bool = true + ) + + defer types.CatchPanic(fmt.Sprintf("crowdsec/LeakRoutine/%s", leaky.Name)) + + BucketsCurrentCount.With(prometheus.Labels{"name": leaky.Name}).Inc() + defer BucketsCurrentCount.With(prometheus.Labels{"name": leaky.Name}).Dec() + + /*todo : we create a logger at runtime while we want leakroutine to be up asap, might not be a good idea*/ + leaky.logger = leaky.BucketConfig.logger.WithFields(log.Fields{"capacity": leaky.Capacity, "partition": leaky.Mapkey, "bucket_id": leaky.Uuid}) + + leaky.Signal <- true + atomic.AddInt64(&LeakyRoutineCount, 1) + defer atomic.AddInt64(&LeakyRoutineCount, -1) + + for _, f := range leaky.BucketConfig.processors { + err := f.OnBucketInit(leaky.BucketConfig) + if err != nil { + leaky.logger.Errorf("Problem at bucket initializiation. Bail out %T : %v", f, err) + close(leaky.Signal) + return fmt.Errorf("Problem at bucket initializiation. Bail out %T : %v", f, err) + } + } + + leaky.logger.Debugf("Leaky routine starting, lifetime : %s", leaky.Duration) + for { + select { + /*receiving an event*/ + case msg := <-leaky.In: + /*the msg var use is confusing and is redeclared in a different type :/*/ + for _, processor := range leaky.BucketConfig.processors { + msg = processor.OnBucketPour(leaky.BucketConfig)(*msg, leaky) + // if &msg == nil we stop processing + if msg == nil { + goto End + } + } + if leaky.logger.Level >= log.TraceLevel { + leaky.logger.Tracef("Pour event: %s", spew.Sdump(msg)) + } + BucketsPour.With(prometheus.Labels{"name": leaky.Name, "source": msg.Line.Src, "type": msg.Line.Module}).Inc() + + leaky.Pour(leaky, *msg) // glue for now + //Clear cache on behalf of pour + + // if durationTicker isn't initialized, then we're pouring our first event + + // reinitialize the durationTicker when it's not a counter bucket + if !leaky.timedOverflow || firstEvent { + if firstEvent { + durationTicker = time.NewTicker(leaky.Duration) + durationTickerChan = durationTicker.C + defer durationTicker.Stop() + } else { + durationTicker.Reset(leaky.Duration) + } + } + firstEvent = false + /*we overflowed*/ + case ofw := <-leaky.Out: + leaky.overflow(ofw) + return nil + /*suiciiiide*/ + case <-leaky.Suicide: + close(leaky.Signal) + BucketsCanceled.With(prometheus.Labels{"name": leaky.Name}).Inc() + leaky.logger.Debugf("Suicide triggered") + leaky.AllOut <- types.Event{Type: types.OVFLW, Overflow: types.RuntimeAlert{Mapkey: leaky.Mapkey}} + leaky.logger.Tracef("Returning from leaky routine.") + return nil + /*we underflow or reach bucket deadline (timers)*/ + case <-durationTickerChan: + var ( + alert types.RuntimeAlert + err error + ) + leaky.Ovflw_ts = time.Now().UTC() + close(leaky.Signal) + ofw := leaky.Queue + alert = types.RuntimeAlert{Mapkey: leaky.Mapkey} + + if leaky.timedOverflow { + BucketsOverflow.With(prometheus.Labels{"name": leaky.Name}).Inc() + + alert, err = NewAlert(leaky, ofw) + if err != nil { + log.Errorf("%s", err) + } + for _, f := range leaky.BucketConfig.processors { + alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw) + if ofw == nil { + leaky.logger.Debugf("Overflow has been discarded (%T)", f) + break + } + } + leaky.logger.Infof("Timed Overflow") + } else { + leaky.logger.Debugf("bucket underflow, destroy") + BucketsUnderflow.With(prometheus.Labels{"name": leaky.Name}).Inc() + + } + if leaky.logger.Level >= log.TraceLevel { + /*don't sdump if it's not going to printed, it's expensive*/ + leaky.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: alert})) + } + + leaky.AllOut <- types.Event{Overflow: alert, Type: types.OVFLW} + leaky.logger.Tracef("Returning from leaky routine.") + return nil + case <-leaky.tomb.Dying(): + leaky.logger.Debugf("Bucket externally killed, return") + for len(leaky.Out) > 0 { + ofw := <-leaky.Out + leaky.overflow(ofw) + } + leaky.AllOut <- types.Event{Type: types.OVFLW, Overflow: types.RuntimeAlert{Mapkey: leaky.Mapkey}} + return nil + + } + End: + } +} + +func Pour(leaky *Leaky, msg types.Event) { + leaky.wgDumpState.Wait() + leaky.wgPour.Add(1) + defer leaky.wgPour.Done() + + leaky.Total_count += 1 + if leaky.First_ts.IsZero() { + leaky.First_ts = time.Now().UTC() + } + leaky.Last_ts = time.Now().UTC() + if leaky.Limiter.Allow() { + leaky.Queue.Add(msg) + } else { + leaky.Ovflw_ts = time.Now().UTC() + leaky.logger.Debugf("Last event to be poured, bucket overflow.") + leaky.Queue.Add(msg) + leaky.Out <- leaky.Queue + } +} + +func (leaky *Leaky) overflow(ofw *Queue) { + close(leaky.Signal) + alert, err := NewAlert(leaky, ofw) + if err != nil { + log.Errorf("%s", err) + } + leaky.logger.Tracef("Overflow hooks time : %v", leaky.BucketConfig.processors) + for _, f := range leaky.BucketConfig.processors { + alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw) + if ofw == nil { + leaky.logger.Debugf("Overflow has been discarded (%T)", f) + break + } + } + if leaky.logger.Level >= log.TraceLevel { + leaky.logger.Tracef("Overflow event: %s", spew.Sdump(alert)) + } + mt, _ := leaky.Ovflw_ts.MarshalText() + leaky.logger.Tracef("overflow time : %s", mt) + + BucketsOverflow.With(prometheus.Labels{"name": leaky.Name}).Inc() + + leaky.AllOut <- types.Event{Overflow: alert, Type: types.OVFLW, MarshaledTime: string(mt)} +} diff --git a/pkg/leakybucket/buckets.go b/pkg/leakybucket/buckets.go new file mode 100644 index 0000000..cfe8d7c --- /dev/null +++ b/pkg/leakybucket/buckets.go @@ -0,0 +1,29 @@ +package leakybucket + +import ( + "crypto/sha1" + "fmt" + "sync" +) + +// Buckets is the struct used to hold buckets in the context of +// main.go the idea is to have one struct to rule them all +type Buckets struct { + wgDumpState *sync.WaitGroup + wgPour *sync.WaitGroup + Bucket_map *sync.Map +} + +// NewBuckets create the Buckets struct +func NewBuckets() *Buckets { + return &Buckets{ + wgDumpState: &sync.WaitGroup{}, + wgPour: &sync.WaitGroup{}, + Bucket_map: &sync.Map{}, + } +} + +func GetKey(bucketCfg BucketFactory, stackkey string) string { + return fmt.Sprintf("%x", sha1.Sum([]byte(bucketCfg.Filter+stackkey+bucketCfg.Name))) + +} diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go new file mode 100644 index 0000000..c6427fb --- /dev/null +++ b/pkg/leakybucket/buckets_test.go @@ -0,0 +1,309 @@ +package leakybucket + +import ( + "bytes" + "encoding/json" + "fmt" + "html/template" + "io" + "os" + "reflect" + "sync" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/parser" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + yaml "gopkg.in/yaml.v2" +) + +type TestFile struct { + Lines []types.Event `yaml:"lines,omitempty"` + Results []types.Event `yaml:"results,omitempty"` +} + +func TestBucket(t *testing.T) { + var ( + envSetting = os.Getenv("TEST_ONLY") + tomb *tomb.Tomb = &tomb.Tomb{} + ) + err := exprhelpers.Init(nil) + if err != nil { + log.Fatalf("exprhelpers init failed: %s", err) + } + + if envSetting != "" { + if err := testOneBucket(t, envSetting, tomb); err != nil { + t.Fatalf("Test '%s' failed : %s", envSetting, err) + } + } else { + wg := new(sync.WaitGroup) + fds, err := os.ReadDir("./tests/") + if err != nil { + t.Fatalf("Unable to read test directory : %s", err) + } + for _, fd := range fds { + fname := "./tests/" + fd.Name() + log.Infof("Running test on %s", fname) + tomb.Go(func() error { + wg.Add(1) + defer wg.Done() + if err := testOneBucket(t, fname, tomb); err != nil { + t.Fatalf("Test '%s' failed : %s", fname, err) + } + return nil + }) + } + wg.Wait() + } +} + +//during tests, we're likely to have only one scenario, and thus only one holder. +//we want to avoid the death of the tomb because all existing buckets have been destroyed. +func watchTomb(tomb *tomb.Tomb) { + for { + if tomb.Alive() == false { + log.Warning("Tomb is dead") + break + } + time.Sleep(100 * time.Millisecond) + } +} + +func testOneBucket(t *testing.T, dir string, tomb *tomb.Tomb) error { + + var ( + holders []BucketFactory + + stagefiles []byte + stagecfg string + stages []parser.Stagefile + err error + buckets *Buckets + ) + buckets = NewBuckets() + + /*load the scenarios*/ + stagecfg = dir + "/scenarios.yaml" + if stagefiles, err = os.ReadFile(stagecfg); err != nil { + t.Fatalf("Failed to load stage file %s : %s", stagecfg, err) + } + + tmpl, err := template.New("test").Parse(string(stagefiles)) + if err != nil { + return fmt.Errorf("failed to parse template %s : %s", stagefiles, err) + } + var out bytes.Buffer + err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) + if err != nil { + panic(err) + } + if err := yaml.UnmarshalStrict(out.Bytes(), &stages); err != nil { + log.Fatalf("failed unmarshaling %s : %s", stagecfg, err) + } + files := []string{} + for _, x := range stages { + files = append(files, x.Filename) + } + + cscfg := &csconfig.CrowdsecServiceCfg{ + DataDir: "tests", + } + holders, response, err := LoadBuckets(cscfg, files, tomb, buckets) + if err != nil { + t.Fatalf("failed loading bucket : %s", err) + } + tomb.Go(func() error { + watchTomb(tomb) + return nil + }) + if !testFile(t, dir+"/test.json", dir+"/in-buckets_state.json", holders, response, buckets) { + return fmt.Errorf("tests from %s failed", dir) + } + return nil +} + +func testFile(t *testing.T, file string, bs string, holders []BucketFactory, response chan types.Event, buckets *Buckets) bool { + + var results []types.Event + var dump bool + + //should we restore + if _, err := os.Stat(bs); err == nil { + dump = true + if err := LoadBucketsState(bs, buckets, holders); err != nil { + t.Fatalf("Failed to load bucket state : %s", err) + } + } + + /* now we can load the test files */ + //process the yaml + yamlFile, err := os.Open(file) + if err != nil { + t.Errorf("yamlFile.Get err #%v ", err) + } + dec := json.NewDecoder(yamlFile) + dec.DisallowUnknownFields() + //dec.SetStrict(true) + tf := TestFile{} + err = dec.Decode(&tf) + if err != nil { + if err != io.EOF { + t.Errorf("Failed to load testfile '%s' yaml error : %v", file, err) + return false + } + log.Warning("end of test file") + } + var latest_ts time.Time + for _, in := range tf.Lines { + //just to avoid any race during ingestion of funny scenarios + time.Sleep(50 * time.Millisecond) + var ts time.Time + if err := ts.UnmarshalText([]byte(in.MarshaledTime)); err != nil { + t.Fatalf("Failed to unmarshal time from input event : %s", err) + } + if latest_ts.IsZero() { + latest_ts = ts + } else if ts.After(latest_ts) { + latest_ts = ts + } + + in.ExpectMode = TIMEMACHINE + log.Infof("Buckets input : %s", spew.Sdump(in)) + ok, err := PourItemToHolders(in, holders, buckets) + if err != nil { + t.Fatalf("Failed to pour : %s", err) + } + if !ok { + log.Warning("Event wasn't poured") + } + } + log.Warning("Done pouring !") + + time.Sleep(1 * time.Second) + + //Read results from chan +POLL_AGAIN: + fails := 0 + for fails < 2 { + select { + case ret := <-response: + log.Warning("got one result") + results = append(results, ret) + if ret.Overflow.Reprocess { + log.Errorf("Overflow being reprocessed.") + ok, err := PourItemToHolders(ret, holders, buckets) + if err != nil { + t.Fatalf("Failed to pour : %s", err) + } + if !ok { + log.Warning("Event wasn't poured") + } + goto POLL_AGAIN + } + fails = 0 + default: + log.Warning("no more results") + time.Sleep(1 * time.Second) + fails += 1 + } + } + log.Warningf("Got %d overflows from run", len(results)) + /* + check the results we got against the expected ones + only the keys of the expected part are checked against result + */ + var tmpFile string + + for { + if len(tf.Results) == 0 && len(results) == 0 { + log.Warning("Test is successful") + if dump { + if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil { + t.Fatalf("Failed to dump bucket state: %s", err) + } + log.Infof("dumped bucket to %s", tmpFile) + } + return true + } + log.Warningf("%d results to check against %d expected results", len(results), len(tf.Results)) + if len(tf.Results) != len(results) { + if dump { + if tmpFile, err = DumpBucketsStateAt(latest_ts, ".", buckets); err != nil { + t.Fatalf("Failed to dump bucket state: %s", err) + } + log.Infof("dumped bucket to %s", tmpFile) + } + log.Errorf("results / expected count doesn't match results = %d / expected = %d", len(results), len(tf.Results)) + return false + } + checkresultsloop: + for eidx, out := range results { + for ridx, expected := range tf.Results { + + log.Tracef("Checking next expected result.") + + //empty overflow + if out.Overflow.Alert == nil && expected.Overflow.Alert == nil { + //match stuff + } else { + if out.Overflow.Alert == nil || expected.Overflow.Alert == nil { + log.Printf("Here ?") + continue + } + + //Scenario + if *out.Overflow.Alert.Scenario != *expected.Overflow.Alert.Scenario { + log.Errorf("(scenario) %v != %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) + continue + } + log.Infof("(scenario) %v == %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) + + //EventsCount + if *out.Overflow.Alert.EventsCount != *expected.Overflow.Alert.EventsCount { + log.Errorf("(EventsCount) %d != %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) + continue + } + log.Infof("(EventsCount) %d == %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) + + //Sources + if !reflect.DeepEqual(out.Overflow.Sources, expected.Overflow.Sources) { + log.Errorf("(Sources %s != %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) + continue + } + log.Infof("(Sources: %s == %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) + } + //Events + // if !reflect.DeepEqual(out.Overflow.Alert.Events, expected.Overflow.Alert.Events) { + // log.Errorf("(Events %s != %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) + // valid = false + // continue + // } else { + // log.Infof("(Events: %s == %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) + // } + + //CheckFailed: + + log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) + //don't do this at home : delete current element from list and redo + results[eidx] = results[len(results)-1] + results = results[:len(results)-1] + tf.Results[ridx] = tf.Results[len(tf.Results)-1] + tf.Results = tf.Results[:len(tf.Results)-1] + goto checkresultsloop + } + } + if len(results) != 0 && len(tf.Results) != 0 { + log.Errorf("mismatching entries left") + log.Errorf("we got: %s", spew.Sdump(results)) + log.Errorf("we expected: %s", spew.Sdump(tf.Results)) + return false + } + log.Warning("entry valid at end of loop") + } +} diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go new file mode 100644 index 0000000..0e4771e --- /dev/null +++ b/pkg/leakybucket/manager_load.go @@ -0,0 +1,412 @@ +package leakybucket + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/types" + + "github.com/davecgh/go-spew/spew" + "github.com/sirupsen/logrus" + log "github.com/sirupsen/logrus" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + "github.com/goombaio/namegenerator" + "gopkg.in/tomb.v2" + yaml "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" +) + +// BucketFactory struct holds all fields for any bucket configuration. This is to have a +// generic struct for buckets. This can be seen as a bucket factory. +type BucketFactory struct { + FormatVersion string `yaml:"format"` + Author string `yaml:"author"` + Description string `yaml:"description"` + References []string `yaml:"references"` + Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics + Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique + Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity + LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket + Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time + Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct + GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip + Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result) + Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically + Labels map[string]string `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow + Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration + logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well) + Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain + CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket + Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc. + OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through + ScopeType types.ScopeType `yaml:"scope,omitempty"` //to enforce a different remediation than blocking an IP. Will default this to IP + BucketName string `yaml:"-"` + Filename string `yaml:"-"` + RunTimeFilter *vm.Program `json:"-"` + ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` // used to debug expression by printing the content of each variable of the expression + RunTimeGroupBy *vm.Program `json:"-"` + Data []*types.DataSource `yaml:"data,omitempty"` + DataDir string `yaml:"-"` + CancelOnFilter string `yaml:"cancel_on,omitempty"` //a filter that, if matched, kills the bucket + leakspeed time.Duration //internal representation of `Leakspeed` + duration time.Duration //internal representation of `Duration` + ret chan types.Event //the bucket-specific output chan for overflows + processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) + output bool //?? + ScenarioVersion string `yaml:"version,omitempty"` + hash string `yaml:"-"` + Simulated bool `yaml:"simulated"` //Set to true if the scenario instantiating the bucket was in the exclusion list + tomb *tomb.Tomb `yaml:"-"` + wgPour *sync.WaitGroup `yaml:"-"` + wgDumpState *sync.WaitGroup `yaml:"-"` +} + +//we use one NameGenerator for all the future buckets +var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()) + +func ValidateFactory(bucketFactory *BucketFactory) error { + if bucketFactory.Name == "" { + return fmt.Errorf("bucket must have name") + } + if bucketFactory.Description == "" { + return fmt.Errorf("description is mandatory") + } + if bucketFactory.Type == "leaky" { + if bucketFactory.Capacity <= 0 { //capacity must be a positive int + return fmt.Errorf("bad capacity for leaky '%d'", bucketFactory.Capacity) + } + if bucketFactory.LeakSpeed == "" { + return fmt.Errorf("leakspeed can't be empty for leaky") + } + if bucketFactory.leakspeed == 0 { + return fmt.Errorf("bad leakspeed for leaky '%s'", bucketFactory.LeakSpeed) + } + } else if bucketFactory.Type == "counter" { + if bucketFactory.Duration == "" { + return fmt.Errorf("duration ca't be empty for counter") + } + if bucketFactory.duration == 0 { + return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration) + } + if bucketFactory.Capacity != -1 { + return fmt.Errorf("counter bucket must have -1 capacity") + } + } else if bucketFactory.Type == "trigger" { + if bucketFactory.Capacity != 0 { + return fmt.Errorf("trigger bucket must have 0 capacity") + } + } else { + return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type) + } + + switch bucketFactory.ScopeType.Scope { + case types.Undefined: + bucketFactory.ScopeType.Scope = types.Ip + case types.Ip: + case types.Range: + var ( + runTimeFilter *vm.Program + err error + ) + if bucketFactory.ScopeType.Filter != "" { + if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))); err != nil { + return fmt.Errorf("Error compiling the scope filter: %s", err) + } + bucketFactory.ScopeType.RunTimeFilter = runTimeFilter + } + + default: + //Compile the scope filter + var ( + runTimeFilter *vm.Program + err error + ) + if bucketFactory.ScopeType.Filter != "" { + if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))); err != nil { + return fmt.Errorf("Error compiling the scope filter: %s", err) + } + bucketFactory.ScopeType.RunTimeFilter = runTimeFilter + } + } + return nil +} + +func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, files []string, tomb *tomb.Tomb, buckets *Buckets) ([]BucketFactory, chan types.Event, error) { + var ( + ret []BucketFactory = []BucketFactory{} + response chan types.Event + ) + + response = make(chan types.Event, 1) + for _, f := range files { + log.Debugf("Loading '%s'", f) + if !strings.HasSuffix(f, ".yaml") && !strings.HasSuffix(f, ".yml") { + log.Debugf("Skipping %s : not a yaml file", f) + continue + } + + //process the yaml + bucketConfigurationFile, err := os.Open(f) + if err != nil { + log.Errorf("Can't access leaky configuration file %s", f) + return nil, nil, err + } + dec := yaml.NewDecoder(bucketConfigurationFile) + dec.SetStrict(true) + for { + bucketFactory := BucketFactory{} + err = dec.Decode(&bucketFactory) + if err != nil { + if err != io.EOF { + log.Errorf("Bad yaml in %s : %v", f, err) + return nil, nil, fmt.Errorf("bad yaml in %s : %v", f, err) + } + log.Tracef("End of yaml file") + break + } + bucketFactory.DataDir = cscfg.DataDir + //check empty + if bucketFactory.Name == "" { + log.Errorf("Won't load nameless bucket") + return nil, nil, fmt.Errorf("nameless bucket") + } + //check compat + if bucketFactory.FormatVersion == "" { + log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, f) + bucketFactory.FormatVersion = "1.0" + } + ok, err := cwversion.Statisfies(bucketFactory.FormatVersion, cwversion.Constraint_scenario) + if err != nil { + log.Fatalf("Failed to check version : %s", err) + } + if !ok { + log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, cwversion.Constraint_scenario) + continue + } + + bucketFactory.Filename = filepath.Clean(f) + bucketFactory.BucketName = seed.Generate() + bucketFactory.ret = response + hubItem, err := cwhub.GetItemByPath(cwhub.SCENARIOS, bucketFactory.Filename) + if err != nil { + log.Errorf("scenario %s (%s) couldn't be find in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) + } else { + if cscfg.SimulationConfig != nil { + bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(hubItem.Name) + } + if hubItem != nil { + bucketFactory.ScenarioVersion = hubItem.LocalVersion + bucketFactory.hash = hubItem.LocalHash + } else { + log.Errorf("scenario %s (%s) couldn't be find in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) + } + } + + bucketFactory.wgDumpState = buckets.wgDumpState + bucketFactory.wgPour = buckets.wgPour + err = LoadBucket(&bucketFactory, tomb) + if err != nil { + log.Errorf("Failed to load bucket %s : %v", bucketFactory.Name, err) + return nil, nil, fmt.Errorf("loading of %s failed : %v", bucketFactory.Name, err) + } + ret = append(ret, bucketFactory) + } + } + log.Warningf("Loaded %d scenarios", len(ret)) + return ret, response, nil +} + +/* Init recursively process yaml files from a directory and loads them as BucketFactory */ +func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { + var err error + if bucketFactory.Debug { + var clog = logrus.New() + if err := types.ConfigureLogger(clog); err != nil { + log.Fatalf("While creating bucket-specific logger : %s", err) + } + clog.SetLevel(log.DebugLevel) + bucketFactory.logger = clog.WithFields(log.Fields{ + "cfg": bucketFactory.BucketName, + "name": bucketFactory.Name, + "file": bucketFactory.Filename, + }) + } else { + /* else bind it to the default one (might find something more elegant here)*/ + bucketFactory.logger = log.WithFields(log.Fields{ + "cfg": bucketFactory.BucketName, + "name": bucketFactory.Name, + "file": bucketFactory.Filename, + }) + } + + if bucketFactory.LeakSpeed != "" { + if bucketFactory.leakspeed, err = time.ParseDuration(bucketFactory.LeakSpeed); err != nil { + return fmt.Errorf("bad leakspeed '%s' in %s : %v", bucketFactory.LeakSpeed, bucketFactory.Filename, err) + } + } else { + bucketFactory.leakspeed = time.Duration(0) + } + if bucketFactory.Duration != "" { + if bucketFactory.duration, err = time.ParseDuration(bucketFactory.Duration); err != nil { + return fmt.Errorf("invalid Duration '%s' in %s : %v", bucketFactory.Duration, bucketFactory.Filename, err) + } + } + + if bucketFactory.Filter == "" { + bucketFactory.logger.Warning("Bucket without filter, abort.") + return fmt.Errorf("bucket without filter directive") + } + bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + return fmt.Errorf("invalid filter '%s' in %s : %v", bucketFactory.Filter, bucketFactory.Filename, err) + } + if bucketFactory.Debug { + bucketFactory.ExprDebugger, err = exprhelpers.NewDebugger(bucketFactory.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + log.Errorf("unable to build debug filter for '%s' : %s", bucketFactory.Filter, err) + } + } + + if bucketFactory.GroupBy != "" { + bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + return fmt.Errorf("invalid groupby '%s' in %s : %v", bucketFactory.GroupBy, bucketFactory.Filename, err) + } + } + + bucketFactory.logger.Infof("Adding %s bucket", bucketFactory.Type) + //return the Holder corresponding to the type of bucket + bucketFactory.processors = []Processor{} + switch bucketFactory.Type { + case "leaky": + bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{}) + case "trigger": + bucketFactory.processors = append(bucketFactory.processors, &Trigger{}) + case "counter": + bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{}) + default: + return fmt.Errorf("invalid type '%s' in %s : %v", bucketFactory.Type, bucketFactory.Filename, err) + } + + if bucketFactory.Distinct != "" { + bucketFactory.logger.Tracef("Adding a non duplicate filter on %s.", bucketFactory.Name) + bucketFactory.processors = append(bucketFactory.processors, &Uniq{}) + } + + if bucketFactory.CancelOnFilter != "" { + bucketFactory.logger.Tracef("Adding a cancel_on filter on %s.", bucketFactory.Name) + bucketFactory.processors = append(bucketFactory.processors, &CancelOnFilter{}) + } + + if bucketFactory.OverflowFilter != "" { + bucketFactory.logger.Tracef("Adding an overflow filter") + filovflw, err := NewOverflowFilter(bucketFactory) + if err != nil { + bucketFactory.logger.Errorf("Error creating overflow_filter : %s", err) + return fmt.Errorf("error creating overflow_filter : %s", err) + } + bucketFactory.processors = append(bucketFactory.processors, filovflw) + } + + if bucketFactory.Blackhole != "" { + bucketFactory.logger.Tracef("Adding blackhole.") + blackhole, err := NewBlackhole(bucketFactory) + if err != nil { + bucketFactory.logger.Errorf("Error creating blackhole : %s", err) + return fmt.Errorf("error creating blackhole : %s", err) + } + bucketFactory.processors = append(bucketFactory.processors, blackhole) + } + + if len(bucketFactory.Data) > 0 { + for _, data := range bucketFactory.Data { + if data.DestPath == "" { + bucketFactory.logger.Errorf("no dest_file provided for '%s'", bucketFactory.Name) + continue + } + err = exprhelpers.FileInit(bucketFactory.DataDir, data.DestPath, data.Type) + if err != nil { + bucketFactory.logger.Errorf("unable to init data for file '%s': %s", data.DestPath, err) + } + } + } + + bucketFactory.output = false + if err := ValidateFactory(bucketFactory); err != nil { + return fmt.Errorf("invalid bucket from %s : %v", bucketFactory.Filename, err) + } + bucketFactory.tomb = tomb + return nil + +} + +func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFactory) error { + var state map[string]Leaky + body, err := os.ReadFile(file) + if err != nil { + return fmt.Errorf("can't state file %s : %s", file, err) + } + if err := json.Unmarshal(body, &state); err != nil { + return fmt.Errorf("can't unmarshal state file %s : %s", file, err) + } + for k, v := range state { + var tbucket *Leaky + log.Debugf("Reloading bucket %s", k) + val, ok := buckets.Bucket_map.Load(k) + if ok { + log.Fatalf("key %s already exists : %+v", k, val) + } + //find back our holder + found := false + for _, h := range bucketFactories { + if h.Name == v.Name { + log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) + //check in which mode the bucket was + if v.Mode == TIMEMACHINE { + tbucket = NewTimeMachine(h) + } else if v.Mode == LIVE { + tbucket = NewLeaky(h) + } else { + log.Errorf("Unknown bucket type : %d", v.Mode) + } + /*Trying to restore queue state*/ + tbucket.Queue = v.Queue + /*Trying to set the limiter to the saved values*/ + tbucket.Limiter.Load(v.SerializedState) + tbucket.In = make(chan *types.Event) + tbucket.Mapkey = k + tbucket.Signal = make(chan bool, 1) + tbucket.First_ts = v.First_ts + tbucket.Last_ts = v.Last_ts + tbucket.Ovflw_ts = v.Ovflw_ts + tbucket.Total_count = v.Total_count + buckets.Bucket_map.Store(k, tbucket) + h.tomb.Go(func() error { + return LeakRoutine(tbucket) + }) + <-tbucket.Signal + found = true + break + } + } + if !found { + log.Fatalf("Unable to find holder for bucket %s : %s", k, spew.Sdump(v)) + } + } + + log.Infof("Restored %d buckets from dump", len(state)) + return nil + +} diff --git a/pkg/leakybucket/manager_load_test.go b/pkg/leakybucket/manager_load_test.go new file mode 100644 index 0000000..8ba5697 --- /dev/null +++ b/pkg/leakybucket/manager_load_test.go @@ -0,0 +1,121 @@ +package leakybucket + +import ( + "fmt" + "testing" + + "gopkg.in/tomb.v2" +) + +type cfgTest struct { + cfg BucketFactory + loadable bool + valid bool +} + +func runTest(tests []cfgTest) error { + var tomb *tomb.Tomb = &tomb.Tomb{} + for idx, cfg := range tests { + err := LoadBucket(&cfg.cfg, tomb) + if cfg.loadable && err != nil { + return fmt.Errorf("expected loadable result (%d/%d), got: %s", idx+1, len(tests), err) + } + if !cfg.loadable && err == nil { + return fmt.Errorf("expected unloadable result (%d/%d)", idx+1, len(tests)) + } + err = ValidateFactory(&cfg.cfg) + if cfg.valid && err != nil { + return fmt.Errorf("expected valid result (%d/%d), got: %s", idx+1, len(tests), err) + } + if !cfg.valid && err == nil { + return fmt.Errorf("expected invalid result (%d/%d)", idx+1, len(tests)) + } + } + return nil +} + +func TestBadBucketsConfig(t *testing.T) { + var CfgTests = []cfgTest{ + //empty + {BucketFactory{}, false, false}, + //missing description + {BucketFactory{Name: "test"}, false, false}, + //missing type + {BucketFactory{Name: "test", Description: "test1"}, false, false}, + //bad type + {BucketFactory{Name: "test", Description: "test1", Type: "ratata"}, false, false}, + } + if err := runTest(CfgTests); err != nil { + t.Fatalf("%s", err) + } +} + +func TestLeakyBucketsConfig(t *testing.T) { + var CfgTests = []cfgTest{ + //leaky with bad capacity + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 0}, false, false}, + //leaky with empty leakspeed + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1}, false, false}, + //leaky with missing filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s"}, false, true}, + //leaky with invalid leakspeed + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "abs", Filter: "true"}, false, false}, + //leaky with valid filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true"}, true, true}, + //leaky with invalid filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "xu"}, false, true}, + //leaky with valid filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true"}, true, true}, + //leaky with bad overflow filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", OverflowFilter: "xu"}, false, true}, + } + + if err := runTest(CfgTests); err != nil { + t.Fatalf("%s", err) + } + +} + +func TestBlackholeConfig(t *testing.T) { + var CfgTests = []cfgTest{ + //basic bh + {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true", Blackhole: "15s"}, true, true}, + //bad bh + {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true", Blackhole: "abc"}, false, true}, + } + + if err := runTest(CfgTests); err != nil { + t.Fatalf("%s", err) + } + +} + +func TestTriggerBucketsConfig(t *testing.T) { + var CfgTests = []cfgTest{ + //basic valid counter + {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true"}, true, true}, + } + + if err := runTest(CfgTests); err != nil { + t.Fatalf("%s", err) + } + +} + +func TestCounterBucketsConfig(t *testing.T) { + var CfgTests = []cfgTest{ + + //basic valid counter + {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Duration: "5s", Filter: "true"}, true, true}, + //missing duration + {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Filter: "true"}, false, false}, + //bad duration + {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Duration: "abc", Filter: "true"}, false, false}, + //capacity must be -1 + {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: 0, Duration: "5s", Filter: "true"}, false, false}, + } + if err := runTest(CfgTests); err != nil { + t.Fatalf("%s", err) + } + +} diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go new file mode 100644 index 0000000..5859967 --- /dev/null +++ b/pkg/leakybucket/manager_run.go @@ -0,0 +1,358 @@ +package leakybucket + +import ( + "encoding/json" + "fmt" + "math" + "os" + "time" + + "github.com/pkg/errors" + + "github.com/mohae/deepcopy" + log "github.com/sirupsen/logrus" + + "github.com/antonmedv/expr" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/prometheus/client_golang/prometheus" +) + +var serialized map[string]Leaky +var BucketPourCache map[string][]types.Event +var BucketPourTrack bool + +/*The leaky routines lifecycle are based on "real" time. +But when we are running in time-machine mode, the reference time is in logs and not "real" time. +Thus we need to garbage collect them to avoid a skyrocketing memory usage.*/ +func GarbageCollectBuckets(deadline time.Time, buckets *Buckets) error { + buckets.wgPour.Wait() + buckets.wgDumpState.Add(1) + defer buckets.wgDumpState.Done() + + total := 0 + discard := 0 + toflush := []string{} + buckets.Bucket_map.Range(func(rkey, rvalue interface{}) bool { + key := rkey.(string) + val := rvalue.(*Leaky) + total += 1 + //bucket already overflowed, we can kill it + if !val.Ovflw_ts.IsZero() { + discard += 1 + val.logger.Debugf("overflowed at %s.", val.Ovflw_ts) + toflush = append(toflush, key) + val.tomb.Kill(nil) + return true + } + /*FIXME : sometimes the gettokenscountat has some rounding issues when we try to + match it with bucket capacity, even if the bucket has long due underflow. Round to 2 decimals*/ + tokat := val.Limiter.GetTokensCountAt(deadline) + tokcapa := float64(val.Capacity) + tokat = math.Round(tokat*100) / 100 + tokcapa = math.Round(tokcapa*100) / 100 + //bucket actually underflowed based on log time, but no in real time + if tokat >= tokcapa { + BucketsUnderflow.With(prometheus.Labels{"name": val.Name}).Inc() + val.logger.Debugf("UNDERFLOW : first_ts:%s tokens_at:%f capcity:%f", val.First_ts, tokat, tokcapa) + toflush = append(toflush, key) + val.tomb.Kill(nil) + return true + } + + val.logger.Tracef("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa) + if _, ok := serialized[key]; ok { + log.Errorf("entry %s already exists", key) + return false + } + log.Debugf("serialize %s of %s : %s", val.Name, val.Uuid, val.Mapkey) + + return true + }) + log.Infof("Cleaned %d buckets", len(toflush)) + for _, flushkey := range toflush { + buckets.Bucket_map.Delete(flushkey) + } + return nil +} + +func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) (string, error) { + + //synchronize with PourItemtoHolders + buckets.wgPour.Wait() + buckets.wgDumpState.Add(1) + defer buckets.wgDumpState.Done() + + if outputdir == "" { + return "", fmt.Errorf("empty output dir for dump bucket state") + } + tmpFd, err := os.CreateTemp(os.TempDir(), "crowdsec-buckets-dump-") + if err != nil { + return "", fmt.Errorf("failed to create temp file : %s", err) + } + defer tmpFd.Close() + tmpFileName := tmpFd.Name() + serialized = make(map[string]Leaky) + log.Printf("Dumping buckets state at %s", deadline) + total := 0 + discard := 0 + buckets.Bucket_map.Range(func(rkey, rvalue interface{}) bool { + key := rkey.(string) + val := rvalue.(*Leaky) + total += 1 + if !val.Ovflw_ts.IsZero() { + discard += 1 + val.logger.Debugf("overflowed at %s.", val.Ovflw_ts) + return true + } + /*FIXME : sometimes the gettokenscountat has some rounding issues when we try to + match it with bucket capacity, even if the bucket has long due underflow. Round to 2 decimals*/ + tokat := val.Limiter.GetTokensCountAt(deadline) + tokcapa := float64(val.Capacity) + tokat = math.Round(tokat*100) / 100 + tokcapa = math.Round(tokcapa*100) / 100 + + if tokat >= tokcapa { + BucketsUnderflow.With(prometheus.Labels{"name": val.Name}).Inc() + val.logger.Debugf("UNDERFLOW : first_ts:%s tokens_at:%f capcity:%f", val.First_ts, tokat, tokcapa) + discard += 1 + return true + } + val.logger.Debugf("(%s) not dead, count:%f capacity:%f", val.First_ts, tokat, tokcapa) + + if _, ok := serialized[key]; ok { + log.Errorf("entry %s already exists", key) + return false + } + log.Debugf("serialize %s of %s : %s", val.Name, val.Uuid, val.Mapkey) + val.SerializedState = val.Limiter.Dump() + serialized[key] = *val + return true + }) + bbuckets, err := json.MarshalIndent(serialized, "", " ") + if err != nil { + return "", fmt.Errorf("Failed to unmarshal buckets : %s", err) + } + size, err := tmpFd.Write(bbuckets) + if err != nil { + return "", fmt.Errorf("failed to write temp file : %s", err) + } + log.Infof("Serialized %d live buckets (+%d expired) in %d bytes to %s", len(serialized), discard, size, tmpFd.Name()) + serialized = nil + return tmpFileName, nil +} + +func ShutdownAllBuckets(buckets *Buckets) error { + buckets.Bucket_map.Range(func(rkey, rvalue interface{}) bool { + key := rkey.(string) + val := rvalue.(*Leaky) + val.tomb.Kill(nil) + log.Infof("killed %s", key) + return true + }) + return nil +} + +func PourItemToBucket(bucket *Leaky, holder BucketFactory, buckets *Buckets, parsed *types.Event) (bool, error) { + var sent bool + var buckey = bucket.Mapkey + var err error + + sigclosed := 0 + failed_sent := 0 + attempts := 0 + start := time.Now().UTC() + + for !sent { + attempts += 1 + /* Warn the user if we used more than a 100 ms to pour an event, it's at least an half lock*/ + if attempts%100000 == 0 && start.Add(100*time.Millisecond).Before(time.Now().UTC()) { + holder.logger.Warningf("stuck for %s sending event to %s (sigclosed:%d failed_sent:%d attempts:%d)", time.Since(start), + buckey, sigclosed, failed_sent, attempts) + } + + /* check if leak routine is up */ + select { + case _, ok := <-bucket.Signal: + if !ok { + //the bucket was found and dead, get a new one and continue + bucket.logger.Tracef("Bucket %s found dead, cleanup the body", buckey) + buckets.Bucket_map.Delete(buckey) + sigclosed += 1 + bucket, err = LoadOrStoreBucketFromHolder(buckey, buckets, holder, parsed.ExpectMode) + if err != nil { + return false, err + } + continue + } + //holder.logger.Tracef("Signal exists, try to pour :)") + default: + /*nothing to read, but not closed, try to pour */ + //holder.logger.Tracef("Signal exists but empty, try to pour :)") + } + + /*let's see if this time-bucket should have expired */ + if bucket.Mode == TIMEMACHINE { + bucket.mutex.Lock() + firstTs := bucket.First_ts + lastTs := bucket.Last_ts + bucket.mutex.Unlock() + + if !firstTs.IsZero() { + var d time.Time + err = d.UnmarshalText([]byte(parsed.MarshaledTime)) + if err != nil { + holder.logger.Warningf("Failed unmarshaling event time (%s) : %v", parsed.MarshaledTime, err) + } + if d.After(lastTs.Add(bucket.Duration)) { + bucket.logger.Tracef("bucket is expired (curr event: %s, bucket deadline: %s), kill", d, lastTs.Add(bucket.Duration)) + buckets.Bucket_map.Delete(buckey) + //not sure about this, should we create a new one ? + sigclosed += 1 + bucket, err = LoadOrStoreBucketFromHolder(buckey, buckets, holder, parsed.ExpectMode) + if err != nil { + return false, err + } + continue + } + } + } + /*the bucket seems to be up & running*/ + select { + case bucket.In <- parsed: + //holder.logger.Tracef("Successfully sent !") + if BucketPourTrack { + if _, ok := BucketPourCache[bucket.Name]; !ok { + BucketPourCache[bucket.Name] = make([]types.Event, 0) + } + evt := deepcopy.Copy(*parsed) + BucketPourCache[bucket.Name] = append(BucketPourCache[bucket.Name], evt.(types.Event)) + } + sent = true + continue + default: + failed_sent += 1 + //holder.logger.Tracef("Failed to send, try again") + continue + + } + } + holder.logger.Debugf("bucket '%s' is poured", holder.Name) + return sent, nil +} + +func LoadOrStoreBucketFromHolder(partitionKey string, buckets *Buckets, holder BucketFactory, expectMode int) (*Leaky, error) { + + biface, ok := buckets.Bucket_map.Load(partitionKey) + + /* the bucket doesn't exist, create it !*/ + if !ok { + var fresh_bucket *Leaky + + switch expectMode { + case TIMEMACHINE: + fresh_bucket = NewTimeMachine(holder) + holder.logger.Debugf("Creating TimeMachine bucket") + case LIVE: + fresh_bucket = NewLeaky(holder) + holder.logger.Debugf("Creating Live bucket") + default: + return nil, fmt.Errorf("input event has no expected mode : %+v", expectMode) + } + fresh_bucket.In = make(chan *types.Event) + fresh_bucket.Mapkey = partitionKey + fresh_bucket.Signal = make(chan bool, 1) + actual, stored := buckets.Bucket_map.LoadOrStore(partitionKey, fresh_bucket) + if !stored { + holder.tomb.Go(func() error { + return LeakRoutine(fresh_bucket) + }) + biface = fresh_bucket + //once the created goroutine is ready to process event, we can return it + <-fresh_bucket.Signal + } else { + holder.logger.Debugf("Unexpectedly found exisint bucket for %s", partitionKey) + biface = actual + } + holder.logger.Debugf("Created new bucket %s", partitionKey) + } + return biface.(*Leaky), nil +} + +func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buckets) (bool, error) { + var ( + ok, condition, poured bool + ) + + if BucketPourTrack { + if BucketPourCache == nil { + BucketPourCache = make(map[string][]types.Event) + } + if _, ok = BucketPourCache["OK"]; !ok { + BucketPourCache["OK"] = make([]types.Event, 0) + } + evt := deepcopy.Copy(parsed) + BucketPourCache["OK"] = append(BucketPourCache["OK"], evt.(types.Event)) + } + + cachedExprEnv := exprhelpers.GetExprEnv(map[string]interface{}{"evt": &parsed}) + + //find the relevant holders (scenarios) + for idx := 0; idx < len(holders); idx++ { + //for idx, holder := range holders { + + //evaluate bucket's condition + if holders[idx].RunTimeFilter != nil { + holders[idx].logger.Tracef("event against holder %d/%d", idx, len(holders)) + output, err := expr.Run(holders[idx].RunTimeFilter, cachedExprEnv) + if err != nil { + holders[idx].logger.Errorf("failed parsing : %v", err) + return false, fmt.Errorf("leaky failed : %s", err) + } + // we assume we a bool should add type check here + if condition, ok = output.(bool); !ok { + holders[idx].logger.Errorf("unexpected non-bool return : %T", output) + holders[idx].logger.Fatalf("Filter issue") + } + + if holders[idx].Debug { + holders[idx].ExprDebugger.Run(holders[idx].logger, condition, cachedExprEnv) + } + if !condition { + holders[idx].logger.Debugf("Event leaving node : ko (filter mismatch)") + continue + } + } + + //groupby determines the partition key for the specific bucket + var groupby string + if holders[idx].RunTimeGroupBy != nil { + tmpGroupBy, err := expr.Run(holders[idx].RunTimeGroupBy, cachedExprEnv) + if err != nil { + holders[idx].logger.Errorf("failed groupby : %v", err) + return false, errors.New("leaky failed :/") + } + + if groupby, ok = tmpGroupBy.(string); !ok { + holders[idx].logger.Fatalf("failed groupby type : %v", err) + return false, errors.New("groupby wrong type") + } + } + buckey := GetKey(holders[idx], groupby) + + //we need to either find the existing bucket, or create a new one (if it's the first event to hit it for this partition key) + bucket, err := LoadOrStoreBucketFromHolder(buckey, buckets, holders[idx], parsed.ExpectMode) + if err != nil { + return false, errors.Wrap(err, "failed to load or store bucket") + } + //finally, pour the even into the bucket + ok, err := PourItemToBucket(bucket, holders[idx], buckets, &parsed) + if err != nil { + return false, errors.Wrap(err, "failed to pour bucket") + } + if ok { + poured = true + } + } + return poured, nil +} diff --git a/pkg/leakybucket/manager_run_test.go b/pkg/leakybucket/manager_run_test.go new file mode 100644 index 0000000..f654a27 --- /dev/null +++ b/pkg/leakybucket/manager_run_test.go @@ -0,0 +1,182 @@ +package leakybucket + +import ( + "fmt" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" +) + +func expectBucketCount(buckets *Buckets, expected int) error { + count := 0 + buckets.Bucket_map.Range(func(rkey, rvalue interface{}) bool { + count++ + return true + }) + if count != expected { + return fmt.Errorf("expected %d live buckets, got %d", expected, count) + } + return nil + +} + +func TestGCandDump(t *testing.T) { + var ( + buckets *Buckets = NewBuckets() + tomb *tomb.Tomb = &tomb.Tomb{} + ) + + var Holders = []BucketFactory{ + //one overflowing soon + bh + BucketFactory{ + Name: "test_counter_fast", + Description: "test_counter_fast", + Debug: true, + Type: "counter", + Capacity: -1, + Duration: "0.5s", + Blackhole: "1m", + Filter: "true", + wgDumpState: buckets.wgDumpState, + wgPour: buckets.wgPour, + }, + //one long counter + BucketFactory{ + Name: "test_counter_slow", + Description: "test_counter_slow", + Debug: true, + Type: "counter", + Capacity: -1, + Duration: "10m", + Filter: "true", + wgDumpState: buckets.wgDumpState, + wgPour: buckets.wgPour, + }, + //slow leaky + BucketFactory{ + Name: "test_leaky_slow", + Description: "test_leaky_slow", + Debug: true, + Type: "leaky", + Capacity: 5, + LeakSpeed: "10m", + Filter: "true", + wgDumpState: buckets.wgDumpState, + wgPour: buckets.wgPour, + }, + } + + for idx := range Holders { + if err := LoadBucket(&Holders[idx], tomb); err != nil { + t.Fatalf("while loading (%d/%d): %s", idx, len(Holders), err) + } + if err := ValidateFactory(&Holders[idx]); err != nil { + t.Fatalf("while validating (%d/%d): %s", idx, len(Holders), err) + } + } + + log.Printf("Pouring to bucket") + + var in = types.Event{Parsed: map[string]string{"something": "something"}} + //pour an item that will go to leaky + counter + ok, err := PourItemToHolders(in, Holders, buckets) + if err != nil { + t.Fatalf("while pouring item : %s", err) + } + if !ok { + t.Fatalf("didn't pour item") + } + + time.Sleep(2 * time.Second) + + if err := expectBucketCount(buckets, 3); err != nil { + t.Fatal(err) + } + log.Printf("Bucket GC") + + //call garbage collector + if err := GarbageCollectBuckets(time.Now().UTC(), buckets); err != nil { + t.Fatalf("failed to garbage collect buckets : %s", err) + } + + if err := expectBucketCount(buckets, 1); err != nil { + t.Fatal(err) + } + + log.Printf("Dumping buckets state") + //dump remaining buckets + if _, err := DumpBucketsStateAt(time.Now().UTC(), ".", buckets); err != nil { + t.Fatalf("failed to dump buckets : %s", err) + } +} + +func TestShutdownBuckets(t *testing.T) { + var ( + buckets *Buckets = NewBuckets() + Holders = []BucketFactory{ + //one long counter + BucketFactory{ + Name: "test_counter_slow", + Description: "test_counter_slow", + Debug: true, + Type: "counter", + Capacity: -1, + Duration: "10m", + Filter: "true", + wgDumpState: buckets.wgDumpState, + wgPour: buckets.wgPour, + }, + //slow leaky + BucketFactory{ + Name: "test_leaky_slow", + Description: "test_leaky_slow", + Debug: true, + Type: "leaky", + Capacity: 5, + LeakSpeed: "10m", + Filter: "true", + wgDumpState: buckets.wgDumpState, + wgPour: buckets.wgPour, + }, + } + tomb *tomb.Tomb = &tomb.Tomb{} + ) + + for idx := range Holders { + if err := LoadBucket(&Holders[idx], tomb); err != nil { + t.Fatalf("while loading (%d/%d): %s", idx, len(Holders), err) + } + if err := ValidateFactory(&Holders[idx]); err != nil { + t.Fatalf("while validating (%d/%d): %s", idx, len(Holders), err) + } + } + + log.Printf("Pouring to bucket") + + var in = types.Event{Parsed: map[string]string{"something": "something"}} + //pour an item that will go to leaky + counter + ok, err := PourItemToHolders(in, Holders, buckets) + if err != nil { + t.Fatalf("while pouring item : %s", err) + } + if !ok { + t.Fatalf("didn't pour item") + } + + time.Sleep(1 * time.Second) + + if err := expectBucketCount(buckets, 2); err != nil { + t.Fatal(err) + } + if err := ShutdownAllBuckets(buckets); err != nil { + t.Fatalf("while shutting down buckets : %s", err) + } + time.Sleep(2 * time.Second) + if err := expectBucketCount(buckets, 2); err != nil { + t.Fatal(err) + } + +} diff --git a/pkg/leakybucket/overflow_filter.go b/pkg/leakybucket/overflow_filter.go new file mode 100644 index 0000000..7be6720 --- /dev/null +++ b/pkg/leakybucket/overflow_filter.go @@ -0,0 +1,62 @@ +package leakybucket + +import ( + "fmt" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Uniq creates three new functions that share the same initialisation and the same scope. +// They are triggered respectively: +// on pour +// on overflow +// on leak + +type OverflowFilter struct { + Filter string + FilterRuntime *vm.Program + DumbProcessor +} + +func NewOverflowFilter(g *BucketFactory) (*OverflowFilter, error) { + var err error + + u := OverflowFilter{} + u.Filter = g.OverflowFilter + u.FilterRuntime, err = expr.Compile(u.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{ + "queue": &Queue{}, "signal": &types.RuntimeAlert{}, "leaky": &Leaky{}}))) + if err != nil { + g.logger.Errorf("Unable to compile filter : %v", err) + return nil, fmt.Errorf("unable to compile filter : %v", err) + } + return &u, nil +} + +func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) { + return func(l *Leaky, s types.RuntimeAlert, q *Queue) (types.RuntimeAlert, *Queue) { + el, err := expr.Run(u.FilterRuntime, exprhelpers.GetExprEnv(map[string]interface{}{ + "queue": q, "signal": s, "leaky": l})) + if err != nil { + l.logger.Errorf("Failed running overflow filter: %s", err) + return s, q + } + element, ok := el.(bool) + if !ok { + l.logger.Errorf("Overflow filter didn't return bool: %s", err) + return s, q + } + /*filter returned false, event is blackholded*/ + if !element { + l.logger.Infof("Event is discarded by overflow filter (%s)", u.Filter) + return types.RuntimeAlert{ + Mapkey: l.Mapkey, + }, nil + } + l.logger.Tracef("Event is not discarded by overflow filter (%s)", u.Filter) + return s, q + } +} diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go new file mode 100644 index 0000000..3a7732a --- /dev/null +++ b/pkg/leakybucket/overflows.go @@ -0,0 +1,322 @@ +package leakybucket + +import ( + "fmt" + "net" + "sort" + "strconv" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/antonmedv/expr" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" +) + +//SourceFromEvent extracts and formats a valid models.Source object from an Event +func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { + srcs := make(map[string]models.Source) + /*if it's already an overflow, we have properly formatted sources. + we can just twitch them to reflect the requested scope*/ + if evt.Type == types.OVFLW { + + for k, v := range evt.Overflow.Sources { + + /*the scopes are already similar, nothing to do*/ + if leaky.scopeType.Scope == *v.Scope { + srcs[k] = v + continue + } + + /*The bucket requires a decision on scope Range */ + if leaky.scopeType.Scope == types.Range { + /*the original bucket was target IPs, check that we do have range*/ + if *v.Scope == types.Ip { + src := models.Source{} + src.AsName = v.AsName + src.AsNumber = v.AsNumber + src.Cn = v.Cn + src.Latitude = v.Latitude + src.Longitude = v.Longitude + src.Range = v.Range + src.Value = new(string) + src.Scope = new(string) + *src.Scope = leaky.scopeType.Scope + *src.Value = "" + if v.Range != "" { + *src.Value = v.Range + } + if leaky.scopeType.RunTimeFilter != nil { + retValue, err := expr.Run(leaky.scopeType.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &evt})) + if err != nil { + return srcs, errors.Wrapf(err, "while running scope filter") + } + value, ok := retValue.(string) + if !ok { + value = "" + } + src.Value = &value + } + if *src.Value != "" { + srcs[*src.Value] = src + } else { + log.Warningf("bucket %s requires scope Range, but none was provided. It seems that the %s wasn't enriched to include its range.", leaky.Name, *v.Value) + } + } else { + log.Warningf("bucket %s requires scope Range, but can't extrapolate from %s (%s)", + leaky.Name, *v.Scope, *v.Value) + } + } + } + return srcs, nil + } + src := models.Source{} + switch leaky.scopeType.Scope { + case types.Range, types.Ip: + v, ok := evt.Meta["source_ip"] + if !ok { + return srcs, fmt.Errorf("scope is %s but Meta[source_ip] doesn't exist", leaky.scopeType.Scope) + } + if net.ParseIP(v) == nil { + return srcs, fmt.Errorf("scope is %s but '%s' isn't a valid ip", leaky.scopeType.Scope, v) + } + src.IP = v + src.Scope = &leaky.scopeType.Scope + if v, ok := evt.Enriched["ASNumber"]; ok { + src.AsNumber = v + } else if v, ok := evt.Enriched["ASNNumber"]; ok { + src.AsNumber = v + } + if v, ok := evt.Enriched["IsoCode"]; ok { + src.Cn = v + } + if v, ok := evt.Enriched["ASNOrg"]; ok { + src.AsName = v + } + if v, ok := evt.Enriched["Latitude"]; ok { + l, err := strconv.ParseFloat(v, 32) + if err != nil { + log.Warningf("bad latitude %s : %s", v, err) + } + src.Latitude = float32(l) + } + if v, ok := evt.Enriched["Longitude"]; ok { + l, err := strconv.ParseFloat(v, 32) + if err != nil { + log.Warningf("bad longitude %s : %s", v, err) + } + src.Longitude = float32(l) + } + if v, ok := evt.Meta["SourceRange"]; ok && v != "" { + _, ipNet, err := net.ParseCIDR(v) + if err != nil { + return srcs, fmt.Errorf("Declared range %s of %s can't be parsed", v, src.IP) + } + if ipNet != nil { + src.Range = ipNet.String() + leaky.logger.Tracef("Valid range from %s : %s", src.IP, src.Range) + } + } + if leaky.scopeType.Scope == types.Ip { + src.Value = &src.IP + } else if leaky.scopeType.Scope == types.Range { + src.Value = &src.Range + if leaky.scopeType.RunTimeFilter != nil { + retValue, err := expr.Run(leaky.scopeType.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &evt})) + if err != nil { + return srcs, errors.Wrapf(err, "while running scope filter") + } + + value, ok := retValue.(string) + if !ok { + value = "" + } + src.Value = &value + } + } + srcs[*src.Value] = src + default: + if leaky.scopeType.RunTimeFilter == nil { + return srcs, fmt.Errorf("empty scope information") + } + retValue, err := expr.Run(leaky.scopeType.RunTimeFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &evt})) + if err != nil { + return srcs, errors.Wrapf(err, "while running scope filter") + } + + value, ok := retValue.(string) + if !ok { + value = "" + } + src.Value = &value + src.Scope = new(string) + *src.Scope = leaky.scopeType.Scope + srcs[*src.Value] = src + } + return srcs, nil +} + +//EventsFromQueue iterates the queue to collect & prepare meta-datas from alert +func EventsFromQueue(queue *Queue) []*models.Event { + + events := []*models.Event{} + + for _, evt := range queue.Queue { + if evt.Meta == nil { + continue + } + meta := models.Meta{} + //we want consistence + skeys := make([]string, 0, len(evt.Meta)) + for k := range evt.Meta { + skeys = append(skeys, k) + } + sort.Strings(skeys) + for _, k := range skeys { + v := evt.Meta[k] + subMeta := models.MetaItems0{Key: k, Value: v} + meta = append(meta, &subMeta) + } + + /*check which date to use*/ + ovflwEvent := models.Event{ + Meta: meta, + } + //either MarshaledTime is present and is extracted from log + if evt.MarshaledTime != "" { + tmpTimeStamp := evt.MarshaledTime + ovflwEvent.Timestamp = &tmpTimeStamp + } else if !evt.Time.IsZero() { //or .Time has been set during parse as time.Now().UTC() + ovflwEvent.Timestamp = new(string) + raw, err := evt.Time.MarshalText() + if err != nil { + log.Warningf("while marshaling time '%s' : %s", evt.Time.String(), err) + } else { + *ovflwEvent.Timestamp = string(raw) + } + } else { + log.Warning("Event has no parsed time, no runtime timestamp") + } + + events = append(events, &ovflwEvent) + } + return events +} + +//alertFormatSource iterates over the queue to collect sources +func alertFormatSource(leaky *Leaky, queue *Queue) (map[string]models.Source, string, error) { + var sources map[string]models.Source = make(map[string]models.Source) + var source_type string + + log.Debugf("Formatting (%s) - scope Info : scope_type:%s / scope_filter:%s", leaky.Name, leaky.scopeType.Scope, leaky.scopeType.Filter) + + for _, evt := range queue.Queue { + srcs, err := SourceFromEvent(evt, leaky) + if err != nil { + return nil, "", errors.Wrapf(err, "while extracting scope from bucket %s", leaky.Name) + } + for key, src := range srcs { + if source_type == types.Undefined { + source_type = *src.Scope + } + if *src.Scope != source_type { + return nil, "", + fmt.Errorf("event has multiple source types : %s != %s", *src.Scope, source_type) + } + sources[key] = src + } + } + return sources, source_type, nil +} + +//NewAlert will generate a RuntimeAlert and its APIAlert(s) from a bucket that overflowed +func NewAlert(leaky *Leaky, queue *Queue) (types.RuntimeAlert, error) { + var runtimeAlert types.RuntimeAlert + + leaky.logger.Tracef("Overflow (start: %s, end: %s)", leaky.First_ts, leaky.Ovflw_ts) + /* + Craft the models.Alert that is going to be duplicated for each source + */ + start_at, err := leaky.First_ts.MarshalText() + if err != nil { + log.Warningf("failed to marshal start ts %s : %s", leaky.First_ts.String(), err) + } + stop_at, err := leaky.Ovflw_ts.MarshalText() + if err != nil { + log.Warningf("failed to marshal ovflw ts %s : %s", leaky.First_ts.String(), err) + } + capacity := int32(leaky.Capacity) + EventsCount := int32(leaky.Total_count) + leakSpeed := leaky.Leakspeed.String() + startAt := string(start_at) + stopAt := string(stop_at) + apiAlert := models.Alert{ + Scenario: &leaky.Name, + ScenarioHash: &leaky.hash, + ScenarioVersion: &leaky.scenarioVersion, + Capacity: &capacity, + EventsCount: &EventsCount, + Leakspeed: &leakSpeed, + Message: new(string), + StartAt: &startAt, + StopAt: &stopAt, + Simulated: &leaky.Simulated, + } + if leaky.BucketConfig == nil { + return runtimeAlert, fmt.Errorf("leaky.BucketConfig is nil") + } + + //give information about the bucket + runtimeAlert.Mapkey = leaky.Mapkey + + //Get the sources from Leaky/Queue + sources, source_scope, err := alertFormatSource(leaky, queue) + if err != nil { + return runtimeAlert, errors.Wrap(err, "unable to collect sources from bucket") + } + runtimeAlert.Sources = sources + //Include source info in format string + sourceStr := "UNKNOWN" + if len(sources) > 1 { + sourceStr = fmt.Sprintf("%d sources", len(sources)) + } else if len(sources) == 1 { + for k := range sources { + sourceStr = k + break + } + } + + *apiAlert.Message = fmt.Sprintf("%s %s performed '%s' (%d events over %s) at %s", source_scope, sourceStr, leaky.Name, leaky.Total_count, leaky.Ovflw_ts.Sub(leaky.First_ts), leaky.Last_ts) + //Get the events from Leaky/Queue + apiAlert.Events = EventsFromQueue(queue) + + //Loop over the Sources and generate appropriate number of ApiAlerts + for _, srcValue := range sources { + newApiAlert := apiAlert + srcCopy := srcValue + newApiAlert.Source = &srcCopy + if v, ok := leaky.BucketConfig.Labels["remediation"]; ok && v == "true" { + newApiAlert.Remediation = true + } + + if err := newApiAlert.Validate(strfmt.Default); err != nil { + log.Errorf("Generated alerts isn't valid") + log.Errorf("->%s", spew.Sdump(newApiAlert)) + log.Fatalf("error : %s", err) + } + runtimeAlert.APIAlerts = append(runtimeAlert.APIAlerts, newApiAlert) + } + + if len(runtimeAlert.APIAlerts) > 0 { + runtimeAlert.Alert = &runtimeAlert.APIAlerts[0] + } + + if leaky.Reprocess { + runtimeAlert.Reprocess = true + } + return runtimeAlert, nil +} diff --git a/pkg/leakybucket/processor.go b/pkg/leakybucket/processor.go new file mode 100644 index 0000000..50693e7 --- /dev/null +++ b/pkg/leakybucket/processor.go @@ -0,0 +1,29 @@ +package leakybucket + +import "github.com/crowdsecurity/crowdsec/pkg/types" + +type Processor interface { + OnBucketInit(Bucket *BucketFactory) error + OnBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event + OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) +} + +type DumbProcessor struct { +} + +func (d *DumbProcessor) OnBucketInit(bucketFactory *BucketFactory) error { + return nil +} + +func (d *DumbProcessor) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event { + return func(msg types.Event, leaky *Leaky) *types.Event { + return &msg + } +} + +func (d *DumbProcessor) OnBucketOverflow(b *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) { + return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) { + return alert, queue + } + +} diff --git a/pkg/leakybucket/queue.go b/pkg/leakybucket/queue.go new file mode 100644 index 0000000..03130b7 --- /dev/null +++ b/pkg/leakybucket/queue.go @@ -0,0 +1,42 @@ +package leakybucket + +import ( + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +// Queue holds a limited size queue +type Queue struct { + Queue []types.Event + L int //capacity +} + +// NewQueue create a new queue with a size of l +func NewQueue(l int) *Queue { + if l == -1 { + return &Queue{ + Queue: make([]types.Event, 0), + L: int(^uint(0) >> 1), // max integer value, architecture independent + } + } + q := &Queue{ + Queue: make([]types.Event, 0, l), + L: l, + } + log.WithFields(log.Fields{"Capacity": q.L}).Debugf("Creating queue") + return q +} + +// Add an event in the queue. If it has already l elements, the first +// element is dropped before adding the new m element +func (q *Queue) Add(m types.Event) { + for len(q.Queue) > q.L { //we allow to add one element more than the true capacity + q.Queue = q.Queue[1:] + } + q.Queue = append(q.Queue, m) +} + +// GetQueue returns the entire queue +func (q *Queue) GetQueue() []types.Event { + return q.Queue +} diff --git a/pkg/leakybucket/reset_filter.go b/pkg/leakybucket/reset_filter.go new file mode 100644 index 0000000..0d50294 --- /dev/null +++ b/pkg/leakybucket/reset_filter.go @@ -0,0 +1,109 @@ +package leakybucket + +import ( + "sync" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// ResetFilter allows to kill the bucket (without overflowing), if a particular condition is met. +// An example would be a scenario to detect aggressive crawlers that *do not* fetch any static resources : +// type : leaky +// filter: "evt.Meta.log_type == 'http_access-log' +// reset_filter: evt.Parsed.request endswith '.css' +// .... +// Thus, if the bucket receives a request that matches fetching a static resource (here css), it cancels itself + +type CancelOnFilter struct { + CancelOnFilter *vm.Program + CancelOnFilterDebug *exprhelpers.ExprDebugger +} + +var cancelExprCacheLock sync.Mutex +var cancelExprCache map[string]struct { + CancelOnFilter *vm.Program + CancelOnFilterDebug *exprhelpers.ExprDebugger +} + +func (u *CancelOnFilter) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event { + return func(msg types.Event, leaky *Leaky) *types.Event { + var condition, ok bool + if u.CancelOnFilter != nil { + leaky.logger.Tracef("running cancel_on filter") + output, err := expr.Run(u.CancelOnFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &msg})) + if err != nil { + leaky.logger.Warningf("cancel_on error : %s", err) + return &msg + } + //only run debugger expression if condition is false + if u.CancelOnFilterDebug != nil { + u.CancelOnFilterDebug.Run(leaky.logger, condition, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &msg})) + } + if condition, ok = output.(bool); !ok { + leaky.logger.Warningf("cancel_on, unexpected non-bool return : %T", output) + return &msg + } + if condition { + leaky.logger.Debugf("reset_filter matched, kill bucket") + leaky.Suicide <- true + return nil //counter intuitively, we need to keep the message so that it doesn't trigger an endless loop + } + leaky.logger.Debugf("reset_filter didn't match") + } + return &msg + } +} + +func (u *CancelOnFilter) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) { + return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) { + return alert, queue + } +} + +func (u *CancelOnFilter) OnBucketInit(bucketFactory *BucketFactory) error { + var err error + var compiledExpr struct { + CancelOnFilter *vm.Program + CancelOnFilterDebug *exprhelpers.ExprDebugger + } + + if cancelExprCache == nil { + cancelExprCache = make(map[string]struct { + CancelOnFilter *vm.Program + CancelOnFilterDebug *exprhelpers.ExprDebugger + }) + } + + cancelExprCacheLock.Lock() + if compiled, ok := cancelExprCache[bucketFactory.CancelOnFilter]; ok { + cancelExprCacheLock.Unlock() + u.CancelOnFilter = compiled.CancelOnFilter + u.CancelOnFilterDebug = compiled.CancelOnFilterDebug + return nil + } else { + cancelExprCacheLock.Unlock() + //release the lock during compile + compiledExpr.CancelOnFilter, err = expr.Compile(bucketFactory.CancelOnFilter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + bucketFactory.logger.Errorf("reset_filter compile error : %s", err) + return err + } + u.CancelOnFilter = compiledExpr.CancelOnFilter + if bucketFactory.Debug { + compiledExpr.CancelOnFilterDebug, err = exprhelpers.NewDebugger(bucketFactory.CancelOnFilter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + bucketFactory.logger.Errorf("reset_filter debug error : %s", err) + return err + } + u.CancelOnFilterDebug = compiledExpr.CancelOnFilterDebug + } + cancelExprCacheLock.Lock() + cancelExprCache[bucketFactory.CancelOnFilter] = compiledExpr + cancelExprCacheLock.Unlock() + } + return err +} diff --git a/pkg/leakybucket/tests/leaky-fixedqueue/bucket.yaml b/pkg/leakybucket/tests/leaky-fixedqueue/bucket.yaml new file mode 100644 index 0000000..fae9e5f --- /dev/null +++ b/pkg/leakybucket/tests/leaky-fixedqueue/bucket.yaml @@ -0,0 +1,12 @@ +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 5 +cache_size: 3 +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/leaky-fixedqueue/scenarios.yaml b/pkg/leakybucket/tests/leaky-fixedqueue/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/leaky-fixedqueue/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/leaky-fixedqueue/test.json b/pkg/leakybucket/tests/leaky-fixedqueue/test.json new file mode 100644 index 0000000..55f76a9 --- /dev/null +++ b/pkg/leakybucket/tests/leaky-fixedqueue/test.json @@ -0,0 +1,98 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE3 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e", + "ratata" : "foobar2" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE4 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e", + "ratata" : "foobar0" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE5 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e", + "ratata" : "foobar1" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE6 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05.000Z", + "Meta": { + "source_ip": "2a00:1450:4007:816::200e", + "ratata" : "foobar" + } + } + ], + "results": [ + { + "Type" : 1, + "Alert": { + "sources" : { + "2a00:1450:4007:816::200e": { + "ip": "2a00:1450:4007:816::200e", + "scope": "Ip", + "value": "2a00:1450:4007:816::200e" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 6 + } + } + } + ] +} \ No newline at end of file diff --git a/pkg/leakybucket/tests/leaky-scope-range-expression/bucket.yaml b/pkg/leakybucket/tests/leaky-scope-range-expression/bucket.yaml new file mode 100644 index 0000000..1c0c4a1 --- /dev/null +++ b/pkg/leakybucket/tests/leaky-scope-range-expression/bucket.yaml @@ -0,0 +1,14 @@ +type: leaky +debug: true +name: test/leaky-scope-range-expression +description: "Leaky with scope range-expression" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +groupby: evt.Meta.source_ip +labels: + type: overflow_1 +scope: + type: Range + expression: IpToRange(evt.Meta.source_ip, "/16") + diff --git a/pkg/leakybucket/tests/leaky-scope-range-expression/scenarios.yaml b/pkg/leakybucket/tests/leaky-scope-range-expression/scenarios.yaml new file mode 100644 index 0000000..05e1557 --- /dev/null +++ b/pkg/leakybucket/tests/leaky-scope-range-expression/scenarios.yaml @@ -0,0 +1 @@ + - filename: {{.TestDirectory}}/bucket.yaml \ No newline at end of file diff --git a/pkg/leakybucket/tests/leaky-scope-range-expression/test.json b/pkg/leakybucket/tests/leaky-scope-range-expression/test.json new file mode 100644 index 0000000..38bc7ff --- /dev/null +++ b/pkg/leakybucket/tests/leaky-scope-range-expression/test.json @@ -0,0 +1,47 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "192.168.1.1" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05+00:00", + "Meta": { + "source_ip": "192.168.1.1" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "192.168.0.0/16": { + "scope": "Range", + "value": "192.168.0.0/16", + "ip": "192.168.1.1" + } + }, + "Alert" : { + "scenario": "test/leaky-scope-range-expression", + "events_count": 2 + } + } + } + ] + } + + \ No newline at end of file diff --git a/pkg/leakybucket/tests/overflow-with-meta-and-information/bucket.yaml b/pkg/leakybucket/tests/overflow-with-meta-and-information/bucket.yaml new file mode 100644 index 0000000..566a47d --- /dev/null +++ b/pkg/leakybucket/tests/overflow-with-meta-and-information/bucket.yaml @@ -0,0 +1,9 @@ +# ssh bruteforce +version: 1.0 +type: trigger +debug: true +name: test/simple-trigger +description: "Simple trigger" +filter: "evt.Line.Labels.type =='testlog'" +labels: + type: overflow_1 diff --git a/pkg/leakybucket/tests/overflow-with-meta-and-information/scenarios.yaml b/pkg/leakybucket/tests/overflow-with-meta-and-information/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/overflow-with-meta-and-information/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/overflow-with-meta-and-information/test.json b/pkg/leakybucket/tests/overflow-with-meta-and-information/test.json new file mode 100644 index 0000000..3f8c8a6 --- /dev/null +++ b/pkg/leakybucket/tests/overflow-with-meta-and-information/test.json @@ -0,0 +1,81 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00.000Z", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + }, + "Enriched": { + "ASNumber": "1234", + "IsoCode": "FR", + "ASNOrg": "random AS" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00.000Z", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + }, + "Enriched": { + "ASNumber": "1234", + "IsoCode": "FR", + "ASNOrg": "random AS" + } + } + ], + "results" : [ + { + "Alert": { + "Sources": { + "1.2.3.4": { + "as_name": "random AS", + "as_number": "1234", + "cn": "FR", + "ip": "1.2.3.4", + "scope": "Ip", + "value": "1.2.3.4" + } + }, + "Alert" : { + "events_count": 1, + "scenario": "test/simple-trigger", + "scenario_version": "1.0" + } + } + }, + { + "Alert": { + "Sources": { + "1.2.3.4": { + "as_name": "random AS", + "as_number": "1234", + "cn": "FR", + "ip": "1.2.3.4", + "scope": "Ip", + "value": "1.2.3.4" + } + }, + "Alert" : { + "events_count": 1, + "scenario": "test/simple-trigger", + "scenario_version": "1.0" + } + } + + } + ] + } diff --git a/pkg/leakybucket/tests/overflow-with-meta/bucket.yaml b/pkg/leakybucket/tests/overflow-with-meta/bucket.yaml new file mode 100644 index 0000000..378aff8 --- /dev/null +++ b/pkg/leakybucket/tests/overflow-with-meta/bucket.yaml @@ -0,0 +1,9 @@ +# ssh bruteforce +type: trigger +debug: true +name: test/simple-trigger +description: "Simple trigger" +filter: "evt.Line.Labels.type =='testlog'" +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/overflow-with-meta/scenarios.yaml b/pkg/leakybucket/tests/overflow-with-meta/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/overflow-with-meta/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/overflow-with-meta/test.json b/pkg/leakybucket/tests/overflow-with-meta/test.json new file mode 100644 index 0000000..e6a24ba --- /dev/null +++ b/pkg/leakybucket/tests/overflow-with-meta/test.json @@ -0,0 +1,79 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00.000Z", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + }, + "Enriched": { + "ASNumber": "1234", + "IsoCode": "FR", + "ASNOrg": "random AS" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00.000Z", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + }, + "Enriched": { + "ASNumber": "1234", + "IsoCode": "FR", + "ASNOrg": "random AS" + } + } + ], + "results" : [ + { + "Alert": { + "Sources": { + "1.2.3.4": { + "as_name": "random AS", + "as_number": "1234", + "cn": "FR", + "ip": "1.2.3.4", + "scope": "Ip", + "value": "1.2.3.4" + } + }, + "Alert" : { + "events_count": 1, + "scenario": "test/simple-trigger" + } + } + }, + { + "Alert": { + "Sources": { + "1.2.3.4": { + "as_name": "random AS", + "as_number": "1234", + "cn": "FR", + "ip": "1.2.3.4", + "scope": "Ip", + "value": "1.2.3.4" + } + }, + "Alert" : { + "events_count": 1, + "scenario": "test/simple-trigger" + } + } + + } + ] + } \ No newline at end of file diff --git a/pkg/leakybucket/tests/simple-counter-bh/bucket.yaml b/pkg/leakybucket/tests/simple-counter-bh/bucket.yaml new file mode 100644 index 0000000..c53d31d --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter-bh/bucket.yaml @@ -0,0 +1,11 @@ +type: counter +name: test/simple-trigger +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +duration: 1s +overflow_filter: any(queue.Queue, {.Meta.source_ip != '1.2.3.4'} ) +capacity: -1 +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-counter-bh/scenarios.yaml b/pkg/leakybucket/tests/simple-counter-bh/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter-bh/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-counter-bh/test.json b/pkg/leakybucket/tests/simple-counter-bh/test.json new file mode 100644 index 0000000..3529de9 --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter-bh/test.json @@ -0,0 +1,35 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-counter-timeout/bucket.yaml b/pkg/leakybucket/tests/simple-counter-timeout/bucket.yaml new file mode 100644 index 0000000..1b8078a --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter-timeout/bucket.yaml @@ -0,0 +1,10 @@ +type: counter +name: test/simple-trigger +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +duration: 10s +capacity: -1 +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-counter-timeout/scenarios.yaml b/pkg/leakybucket/tests/simple-counter-timeout/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter-timeout/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-counter-timeout/test.json b/pkg/leakybucket/tests/simple-counter-timeout/test.json new file mode 100644 index 0000000..b348ee7 --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter-timeout/test.json @@ -0,0 +1,30 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": null +} + diff --git a/pkg/leakybucket/tests/simple-counter/bucket.yaml b/pkg/leakybucket/tests/simple-counter/bucket.yaml new file mode 100644 index 0000000..3a04dda --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter/bucket.yaml @@ -0,0 +1,10 @@ +type: counter +name: test/simple-counter +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +duration: 1s +capacity: -1 +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-counter/scenarios.yaml b/pkg/leakybucket/tests/simple-counter/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-counter/test.json b/pkg/leakybucket/tests/simple-counter/test.json new file mode 100644 index 0000000..e6e6f03 --- /dev/null +++ b/pkg/leakybucket/tests/simple-counter/test.json @@ -0,0 +1,46 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert": { + "scenario": "test/simple-counter", + "events_count": 2 + } + } + } + ] +} diff --git a/pkg/leakybucket/tests/simple-leaky-blackhole/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-blackhole/bucket.yaml new file mode 100644 index 0000000..ed199b2 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-blackhole/bucket.yaml @@ -0,0 +1,13 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +blackhole: 1m +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-blackhole/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-blackhole/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-blackhole/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-blackhole/test.json b/pkg/leakybucket/tests/simple-leaky-blackhole/test.json new file mode 100644 index 0000000..af33b21 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-blackhole/test.json @@ -0,0 +1,123 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "1" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:04+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "2" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE3 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:15+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "3" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE4 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:16+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "4" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE5 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:01:15+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "5" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE6 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:01:16+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "entry": "6" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 2 + + } + } + }, + { + "Alert": { + } + }, + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 2 + + } + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-cancel_on/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-cancel_on/bucket.yaml new file mode 100644 index 0000000..f2d3716 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-cancel_on/bucket.yaml @@ -0,0 +1,13 @@ +type: leaky +debug: true +name: test/simple-leaky-cancel +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +cancel_on: evt.Parsed.random_value == '42' +leakspeed: "10s" +blackhole: 1m +capacity: 1 +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-cancel_on/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-cancel_on/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-cancel_on/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-cancel_on/test.json b/pkg/leakybucket/tests/simple-leaky-cancel_on/test.json new file mode 100644 index 0000000..1e3023f --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-cancel_on/test.json @@ -0,0 +1,117 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + }, + "Parsed": { + "random_value" : "41" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05+00:00", + "Meta": { + "source_ip": "1.2.3.4" + }, + "Parsed": { + "random_value" : "42" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + }, + "Parsed": { + "random_value" : "41" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "2.2.3.4" + }, + "Parsed": { + "random_value" : "41" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "2.2.3.4" + }, + "Parsed": { + "random_value" : "41" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "2.2.3.4" + }, + "Parsed": { + "random_value" : "41" + } + } + ], + "results": [ + { + "Alert": { + } + }, + { + "Alert": { + "sources": { + "2.2.3.4": { + "scope": "Ip", + "value": "2.2.3.4", + "ip": "2.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky-cancel", + "events_count": 2 + } + } + } + + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-overflow/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-overflow/bucket.yaml new file mode 100644 index 0000000..8c94d2c --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-overflow/bucket.yaml @@ -0,0 +1,11 @@ +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-overflow/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-overflow/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-overflow/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-overflow/test.json b/pkg/leakybucket/tests/simple-leaky-overflow/test.json new file mode 100644 index 0000000..2d3f813 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-overflow/test.json @@ -0,0 +1,46 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 2 + } + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-ovflwfilter/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/bucket.yaml new file mode 100644 index 0000000..f340c78 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/bucket.yaml @@ -0,0 +1,27 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/filter-discard +description: "ko" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +overflow_filter: any(queue.Queue, { Atof(.Meta.specvalue) > 3}) +#overflow_filter: Atof() +groupby: evt.Meta.source_ip +labels: + type: overflow_1 +--- +# ssh bruteforce +type: leaky +debug: true +name: test/filter-ok +description: "ok" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +overflow_filter: any(queue.Queue, { Atof(.Meta.specvalue) > 1}) +#overflow_filter: Atof() +groupby: evt.Meta.source_ip +labels: + type: overflow_2 diff --git a/pkg/leakybucket/tests/simple-leaky-ovflwfilter/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.json b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.json new file mode 100644 index 0000000..0ec5dfa --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-ovflwfilter/test.json @@ -0,0 +1,54 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "specvalue": "1" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:01+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "specvalue": "2" + } + } + ], + "results": [ + { + "Alert": { + } + }, + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/filter-ok", + "events_count": 2 + } + + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-underflow/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-underflow/bucket.yaml new file mode 100644 index 0000000..5317666 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-underflow/bucket.yaml @@ -0,0 +1,12 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "0.5s" +capacity: 2 +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-underflow/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-underflow/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-underflow/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-underflow/test.json b/pkg/leakybucket/tests/simple-leaky-underflow/test.json new file mode 100644 index 0000000..20d383b --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-underflow/test.json @@ -0,0 +1,22 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + } + } + ] +} diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-cachesize/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-uniq-cachesize/bucket.yaml new file mode 100644 index 0000000..88bc2ae --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-cachesize/bucket.yaml @@ -0,0 +1,14 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "20s" +capacity: 3 +cache_size: 1 +distinct: evt.Meta.uniq_key +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-cachesize/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-uniq-cachesize/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-cachesize/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-cachesize/test.json b/pkg/leakybucket/tests/simple-leaky-uniq-cachesize/test.json new file mode 100644 index 0000000..1619e4a --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-cachesize/test.json @@ -0,0 +1,194 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:01+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aab" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:01+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aac" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:02+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:02+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:03+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aab" + } + }, + + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:03+00:00", + "Meta": { + "source_ip": "1.2.3.5", + "uniq_key": "aaa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:04+00:00", + "Meta": { + "source_ip": "1.2.3.5", + "uniq_key": "aab" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:04+00:00", + "Meta": { + "source_ip": "1.2.3.5", + "uniq_key": "aac" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05+00:00", + "Meta": { + "source_ip": "1.2.3.5", + "uniq_key": "aaa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:05+00:00", + "Meta": { + "source_ip": "1.2.3.5", + "uniq_key": "aab" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:06+00:00", + "Meta": { + "source_ip": "1.2.3.5", + "uniq_key": "aac" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:06+00:00", + "Meta": { + "source_ip": "1.2.3.5", + "uniq_key": "aad" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.5": { + "scope": "Ip", + "value": "1.2.3.5", + + "ip": "1.2.3.5" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 4 + } + + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/bucket.yaml new file mode 100644 index 0000000..4fca336 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/bucket.yaml @@ -0,0 +1,13 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 3 +distinct: evt.Meta.uniq_key +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/in-buckets_state.json b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/in-buckets_state.json new file mode 100644 index 0000000..dee5f24 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/in-buckets_state.json @@ -0,0 +1,113 @@ +{ + "cdf58e6ae48e79ac3ae0f006e1a2e627eccd8b63": { + "Name": "test/simple-leaky", + "Mode": 1, + "SerializedState": { + "Limit": 0.1, + "Burst": 3, + "Tokens": 1.1, + "Last": "2020-01-01T10:00:05Z", + "LastEvent": "2020-01-01T10:00:05Z" + }, + "Queue": { + "Queue": [ + { + "Type": 0, + "ExpectMode": 1, + "Whitelisted": false, + "Stage": "", + "Overflow": { + "MapKey": "", + "start_at": "0001-01-01T00:00:00Z", + "stop_at": "0001-01-01T00:00:00Z", + "source": null, + "Source_ip": "", + "Source_range": "", + "Source_AutonomousSystemNumber": "", + "Source_AutonomousSystemOrganization": "", + "Source_Country": "", + "Source_Latitude": 0, + "Source_Longitude": 0, + "Reprocess": false, + "Labels": null + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "2020-01-01T10:00:04Z", + "Process": false, + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + } + }, + { + "Type": 0, + "ExpectMode": 1, + "Whitelisted": false, + "Stage": "", + "Overflow": { + "MapKey": "", + "start_at": "0001-01-01T00:00:00Z", + "stop_at": "0001-01-01T00:00:00Z", + "source": null, + "Source_ip": "", + "Source_range": "", + "Source_AutonomousSystemNumber": "", + "Source_AutonomousSystemOrganization": "", + "Source_Country": "", + "Source_Latitude": 0, + "Source_Longitude": 0, + "Reprocess": false, + "Labels": null + }, + "Time": "0001-01-01T00:00:00Z", + "StrTime": "", + "MarshaledTime": "2020-01-01T10:00:05Z", + "Process": false, + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aab" + } + } + ], + "L": 3 + }, + "Capacity": 3, + "CacheSize": 0, + "Mapkey": "cdf58e6ae48e79ac3ae0f006e1a2e627eccd8b63", + "Reprocess": false, + "Uuid": "dark-bush", + "First_ts": "2020-01-01T10:00:04Z", + "Last_ts": "2020-01-01T10:00:05Z", + "Ovflw_ts": "0001-01-01T00:00:00Z", + "Total_count": 2, + "Leakspeed": 10000000000, + "BucketConfig": { + "FormatVersion": "1.0", + "Author": "", + "Description": "Simple leaky", + "References": null, + "Type": "leaky", + "Name": "test/simple-leaky", + "Capacity": 3, + "LeakSpeed": "10s", + "Duration": "", + "Filter": "evt.Line.Labels.type =='testlog'", + "GroupBy": "evt.Meta.source_ip", + "Distinct": "evt.Meta.uniq_key", + "Debug": true, + "Labels": { + "type": "overflow_1" + }, + "Blackhole": "", + "Reprocess": false, + "CacheSize": 0, + "Profiling": false, + "OverflowFilter": "", + "BucketName": "lingering-river", + "Filename": "/home/bui/github/crowdsec/config/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/bucket.yaml" + }, + "Duration": 40000000000, + "Profiling": false + } +} \ No newline at end of file diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.json b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.json new file mode 100644 index 0000000..e016898 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq-w-buckets_state/test.json @@ -0,0 +1,63 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE3 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:06+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "baa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE3 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:07+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "baa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE4 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:08+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "bab" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 4 + } + + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq/bucket.yaml b/pkg/leakybucket/tests/simple-leaky-uniq/bucket.yaml new file mode 100644 index 0000000..b9692e1 --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq/bucket.yaml @@ -0,0 +1,13 @@ +# ssh bruteforce +type: leaky +debug: true +name: test/simple-leaky +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +leakspeed: "10s" +capacity: 1 +distinct: evt.Meta.uniq_key +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq/scenarios.yaml b/pkg/leakybucket/tests/simple-leaky-uniq/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-leaky-uniq/test.json b/pkg/leakybucket/tests/simple-leaky-uniq/test.json new file mode 100644 index 0000000..07a89bb --- /dev/null +++ b/pkg/leakybucket/tests/simple-leaky-uniq/test.json @@ -0,0 +1,63 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:01+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aaa" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:02+00:00", + "Meta": { + "source_ip": "1.2.3.4", + "uniq_key": "aab" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-leaky", + "events_count": 2 + } + + } + } + ] +} + diff --git a/pkg/leakybucket/tests/simple-trigger-external-data/bucket.yaml b/pkg/leakybucket/tests/simple-trigger-external-data/bucket.yaml new file mode 100644 index 0000000..aca5219 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-external-data/bucket.yaml @@ -0,0 +1,13 @@ +type: trigger +debug: true +name: test/simple-trigger +data: + - source_url: https://invalid.com/test.list + dest_file: simple-trigger-external-data/simple_patterns.txt + type: string +description: "Simple trigger with external data" +filter: "evt.Line.Labels.type =='testlog' && evt.Parsed.tainted_data in File('simple-trigger-external-data/simple_patterns.txt')" +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-trigger-external-data/scenarios.yaml b/pkg/leakybucket/tests/simple-trigger-external-data/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-external-data/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-trigger-external-data/simple_patterns.txt b/pkg/leakybucket/tests/simple-trigger-external-data/simple_patterns.txt new file mode 100644 index 0000000..1845980 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-external-data/simple_patterns.txt @@ -0,0 +1,3 @@ +BBBBBBBBBBB11111XXX +AAAABBBBBBB11111XXX +CCCCCCCCCC11111XXX diff --git a/pkg/leakybucket/tests/simple-trigger-external-data/test.json b/pkg/leakybucket/tests/simple-trigger-external-data/test.json new file mode 100644 index 0000000..6261fe6 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-external-data/test.json @@ -0,0 +1,55 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + }, + "Parsed": { + "tainted_data": "AAAABBBBBBB11111XXX" + } + }, + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE2 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.5" + }, + "Parsed": { + "tainted_data": "ZZZBBBBBBB11111XXX" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-trigger", + "events_count": 1 + } + + } + } + ] +} + + diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml b/pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml new file mode 100644 index 0000000..b4759da --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/bucket.yaml @@ -0,0 +1,10 @@ +type: trigger +debug: true +name: test/simple-trigger-reprocess +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +groupby: evt.Meta.source_ip +reprocess: true +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml b/pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml new file mode 100644 index 0000000..2ebef8f --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/reprocess.yaml @@ -0,0 +1,9 @@ +type: trigger +debug: true +name: test/simple-postoverflow-scenario +description: "Simple post overflow" +#filter: true +filter: "evt.Overflow.Alert != nil && evt.Overflow.Alert.Scenario != nil" +labels: + type: overflow_2 + diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/scenarios.yaml b/pkg/leakybucket/tests/simple-trigger-reprocess/scenarios.yaml new file mode 100644 index 0000000..a6c56d3 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/scenarios.yaml @@ -0,0 +1,3 @@ + - filename: {{.TestDirectory}}/bucket.yaml + - filename: {{.TestDirectory}}/reprocess.yaml + diff --git a/pkg/leakybucket/tests/simple-trigger-reprocess/test.json b/pkg/leakybucket/tests/simple-trigger-reprocess/test.json new file mode 100644 index 0000000..696821a --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger-reprocess/test.json @@ -0,0 +1,52 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-trigger-reprocess", + "events_count": 1 + } + + } + }, + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-postoverflow-scenario", + "events_count": 1 + } + + } + } + ] +} diff --git a/pkg/leakybucket/tests/simple-trigger/bucket.yaml b/pkg/leakybucket/tests/simple-trigger/bucket.yaml new file mode 100644 index 0000000..dcccab7 --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger/bucket.yaml @@ -0,0 +1,9 @@ +type: trigger +debug: true +name: test/simple-trigger +description: "Simple leaky" +filter: "evt.Line.Labels.type =='testlog'" +groupby: evt.Meta.source_ip +labels: + type: overflow_1 + diff --git a/pkg/leakybucket/tests/simple-trigger/scenarios.yaml b/pkg/leakybucket/tests/simple-trigger/scenarios.yaml new file mode 100644 index 0000000..f45f7be --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger/scenarios.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/bucket.yaml + diff --git a/pkg/leakybucket/tests/simple-trigger/test.json b/pkg/leakybucket/tests/simple-trigger/test.json new file mode 100644 index 0000000..3b100fd --- /dev/null +++ b/pkg/leakybucket/tests/simple-trigger/test.json @@ -0,0 +1,35 @@ +{ + "lines": [ + { + "Line": { + "Labels": { + "type": "testlog" + }, + "Raw": "xxheader VALUE1 trailing stuff" + }, + "MarshaledTime": "2020-01-01T10:00:00+00:00", + "Meta": { + "source_ip": "1.2.3.4" + } + } + ], + "results": [ + { + "Alert": { + "sources": { + "1.2.3.4": { + "scope": "Ip", + "value": "1.2.3.4", + + "ip": "1.2.3.4" + } + }, + "Alert" : { + "scenario": "test/simple-trigger", + "events_count": 1 + } + } + } + ] +} + diff --git a/pkg/leakybucket/timemachine.go b/pkg/leakybucket/timemachine.go new file mode 100644 index 0000000..a2cac47 --- /dev/null +++ b/pkg/leakybucket/timemachine.go @@ -0,0 +1,53 @@ +package leakybucket + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" +) + +func TimeMachinePour(l *Leaky, msg types.Event) { + var ( + d time.Time + err error + ) + if msg.MarshaledTime == "" { + log.Warningf("Trying to time-machine event without timestamp : %s", spew.Sdump(msg)) + return + } + + err = d.UnmarshalText([]byte(msg.MarshaledTime)) + if err != nil { + log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) + return + } + + l.Total_count += 1 + l.mutex.Lock() + if l.First_ts.IsZero() { + l.logger.Debugf("First event, bucket creation time : %s", d) + l.First_ts = d + } + l.Last_ts = d + l.mutex.Unlock() + + if l.Limiter.AllowN(d, 1) { + l.logger.Tracef("Time-Pouring event %s (tokens:%f)", d, l.Limiter.GetTokensCount()) + l.Queue.Add(msg) + } else { + l.Ovflw_ts = d + l.logger.Debugf("Bucket overflow at %s", l.Ovflw_ts) + l.Queue.Add(msg) + l.Out <- l.Queue + } +} + +func NewTimeMachine(g BucketFactory) *Leaky { + l := NewLeaky(g) + g.logger.Tracef("Instantiating timeMachine bucket") + l.Pour = TimeMachinePour + l.Mode = TIMEMACHINE + return l +} diff --git a/pkg/leakybucket/trigger.go b/pkg/leakybucket/trigger.go new file mode 100644 index 0000000..2c8b1ee --- /dev/null +++ b/pkg/leakybucket/trigger.go @@ -0,0 +1,42 @@ +package leakybucket + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +type Trigger struct { + DumbProcessor +} + +func (t *Trigger) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *types.Event { + // Pour makes the bucket overflow all the time + // TriggerPour unconditionally overflows + return func(msg types.Event, l *Leaky) *types.Event { + if l.Mode == TIMEMACHINE { + var d time.Time + err := d.UnmarshalText([]byte(msg.MarshaledTime)) + if err != nil { + log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) + d = time.Now().UTC() + } + l.logger.Debugf("yay timemachine overflow time : %s --> %s", d, msg.MarshaledTime) + l.Last_ts = d + l.First_ts = d + l.Ovflw_ts = d + } else { + l.Last_ts = time.Now().UTC() + l.First_ts = time.Now().UTC() + l.Ovflw_ts = time.Now().UTC() + } + l.Total_count = 1 + + l.logger.Infof("Bucket overflow") + l.Queue.Add(msg) + l.Out <- l.Queue + + return nil + } +} diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go new file mode 100644 index 0000000..1e0cdde --- /dev/null +++ b/pkg/leakybucket/uniq.go @@ -0,0 +1,92 @@ +package leakybucket + +import ( + "sync" + + "github.com/antonmedv/expr" + "github.com/antonmedv/expr/vm" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Uniq creates three new functions that share the same initialisation and the same scope. +// They are triggered respectively: +// on pour +// on overflow +// on leak + +var uniqExprCache map[string]vm.Program +var uniqExprCacheLock sync.Mutex + +type Uniq struct { + DistinctCompiled *vm.Program + KeyCache map[string]bool + CacheMutex sync.Mutex +} + +func (u *Uniq) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event { + return func(msg types.Event, leaky *Leaky) *types.Event { + element, err := getElement(msg, u.DistinctCompiled) + if err != nil { + leaky.logger.Errorf("Uniq filter exec failed : %v", err) + return &msg + } + leaky.logger.Tracef("Uniq '%s' -> '%s'", bucketFactory.Distinct, element) + u.CacheMutex.Lock() + defer u.CacheMutex.Unlock() + if _, ok := u.KeyCache[element]; !ok { + leaky.logger.Debugf("Uniq(%s) : ok", element) + u.KeyCache[element] = true + return &msg + + } else { + leaky.logger.Debugf("Uniq(%s) : ko, discard event", element) + return nil + } + } +} + +func (u *Uniq) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, types.RuntimeAlert, *Queue) (types.RuntimeAlert, *Queue) { + return func(leaky *Leaky, alert types.RuntimeAlert, queue *Queue) (types.RuntimeAlert, *Queue) { + return alert, queue + } +} + +func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error { + var err error + var compiledExpr *vm.Program + + if uniqExprCache == nil { + uniqExprCache = make(map[string]vm.Program) + } + + uniqExprCacheLock.Lock() + if compiled, ok := uniqExprCache[bucketFactory.Distinct]; ok { + uniqExprCacheLock.Unlock() + u.DistinctCompiled = &compiled + } else { + uniqExprCacheLock.Unlock() + //release the lock during compile + compiledExpr, err = expr.Compile(bucketFactory.Distinct, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + u.DistinctCompiled = compiledExpr + uniqExprCacheLock.Lock() + uniqExprCache[bucketFactory.Distinct] = *compiledExpr + uniqExprCacheLock.Unlock() + } + u.KeyCache = make(map[string]bool) + return err +} + +// getElement computes a string from an event and a filter +func getElement(msg types.Event, cFilter *vm.Program) (string, error) { + el, err := expr.Run(cFilter, exprhelpers.GetExprEnv(map[string]interface{}{"evt": &msg})) + if err != nil { + return "", err + } + element, ok := el.(string) + if !ok { + return "", err + } + return element, nil +} diff --git a/pkg/metabase/api.go b/pkg/metabase/api.go new file mode 100644 index 0000000..4e33b93 --- /dev/null +++ b/pkg/metabase/api.go @@ -0,0 +1,84 @@ +package metabase + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/dghubble/sling" + log "github.com/sirupsen/logrus" +) + +type APIClient struct { + CTX *sling.Sling + Client *http.Client +} + +const ( + sessionEndpoint = "login" + scanEndpoint = "scan" + resetPasswordEndpoint = "reset_password" + userEndpoint = "user" + databaseEndpoint = "database" +) + +var ( + routes = map[string]string{ + sessionEndpoint: "api/session", + scanEndpoint: "api/database/2/rescan_values", + resetPasswordEndpoint: "api/user/1/password", + userEndpoint: "api/user/1", + databaseEndpoint: "api/database/2", + } +) + +func NewAPIClient(url string) (*APIClient, error) { + httpClient := &http.Client{Timeout: 20 * time.Second} + return &APIClient{ + CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", fmt.Sprintf("crowdsec/%s", cwversion.VersionStr())), + Client: httpClient, + }, nil +} + +func (h *APIClient) Do(method string, route string, body interface{}) (interface{}, interface{}, error) { + var Success interface{} + var Error interface{} + var resp *http.Response + var err error + var data []byte + if body != nil { + data, _ = json.Marshal(body) + } + + switch method { + case "POST": + log.Debugf("POST /%s", route) + log.Debugf("%s", string(data)) + resp, err = h.CTX.New().Post(route).BodyJSON(body).Receive(&Success, &Error) + case "GET": + log.Debugf("GET /%s", route) + resp, err = h.CTX.New().Get(route).Receive(&Success, &Error) + case "PUT": + log.Debugf("PUT /%s", route) + log.Debugf("%s", string(data)) + resp, err = h.CTX.New().Put(route).BodyJSON(body).Receive(&Success, &Error) + case "DELETE": + default: + return nil, nil, fmt.Errorf("unsupported method '%s'", method) + } + if Error != nil { + return Success, Error, fmt.Errorf("http error: %v", Error) + } + + if resp != nil && resp.StatusCode != 200 && resp.StatusCode != 202 { + return Success, Error, fmt.Errorf("bad status code '%d': (success: %+v) | (error: %+v)", resp.StatusCode, Success, Error) + } + return Success, Error, err +} + +// Set set headers as key:value +func (h *APIClient) Set(key string, value string) { + h.CTX = h.CTX.Set(key, value) +} diff --git a/pkg/metabase/container.go b/pkg/metabase/container.go new file mode 100644 index 0000000..76788b0 --- /dev/null +++ b/pkg/metabase/container.go @@ -0,0 +1,186 @@ +package metabase + +import ( + "bufio" + "context" + "fmt" + "runtime" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + log "github.com/sirupsen/logrus" +) + +type Container struct { + ListenAddr string + ListenPort string + SharedFolder string + Image string + Name string + ID string + CLI *client.Client + MBDBUri string + DockerGroupID string +} + +func NewContainer(listenAddr string, listenPort string, sharedFolder string, containerName string, image string, mbDBURI string, dockerGroupID string) (*Container, error) { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, fmt.Errorf("failed to create docker client : %s", err) + } + return &Container{ + ListenAddr: listenAddr, + ListenPort: listenPort, + SharedFolder: sharedFolder, + Image: image, + Name: containerName, + CLI: cli, + MBDBUri: mbDBURI, + DockerGroupID: dockerGroupID, + }, nil +} + +func (c *Container) Create() error { + ctx := context.Background() + log.Printf("Pulling docker image %s", c.Image) + reader, err := c.CLI.ImagePull(ctx, c.Image, types.ImagePullOptions{}) + if err != nil { + return fmt.Errorf("failed to pull docker image : %s", err) + } + defer reader.Close() + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + fmt.Print(".") + } + if err := scanner.Err(); err != nil { + return fmt.Errorf("failed to read imagepull reader: %s", err) + } + fmt.Print("\n") + + hostConfig := &container.HostConfig{ + PortBindings: nat.PortMap{ + "3000/tcp": []nat.PortBinding{ + { + HostIP: c.ListenAddr, + HostPort: c.ListenPort, + }, + }, + }, + Mounts: []mount.Mount{ + { + Type: mount.TypeBind, + Source: c.SharedFolder, + Target: containerSharedFolder, + }, + }, + } + + env := []string{ + fmt.Sprintf("MB_DB_FILE=%s/metabase.db", containerSharedFolder), + } + if c.MBDBUri != "" { + env = append(env, c.MBDBUri) + } + + env = append(env, fmt.Sprintf("MGID=%s", c.DockerGroupID)) + dockerConfig := &container.Config{ + Image: c.Image, + Tty: true, + Env: env, + } + os := runtime.GOOS + switch os { + case "linux": + case "windows", "darwin": + return fmt.Errorf("Mac and Windows are not supported yet") + default: + return fmt.Errorf("OS '%s' is not supported", os) + } + + log.Infof("creating container '%s'", c.Name) + resp, err := c.CLI.ContainerCreate(ctx, dockerConfig, hostConfig, nil, nil, c.Name) + if err != nil { + return fmt.Errorf("failed to create container : %s", err) + } + c.ID = resp.ID + + return nil +} + +func (c *Container) Start() error { + ctx := context.Background() + if err := c.CLI.ContainerStart(ctx, c.Name, types.ContainerStartOptions{}); err != nil { + return fmt.Errorf("failed while starting %s : %s", c.ID, err) + } + + return nil +} + +func StartContainer(name string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("failed to create docker client : %s", err) + } + ctx := context.Background() + if err := cli.ContainerStart(ctx, name, types.ContainerStartOptions{}); err != nil { + return fmt.Errorf("failed while starting %s : %s", name, err) + } + + return nil +} + +func StopContainer(name string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("failed to create docker client : %s", err) + } + ctx := context.Background() + var to time.Duration = 20 * time.Second + if err := cli.ContainerStop(ctx, name, &to); err != nil { + return fmt.Errorf("failed while stopping %s : %s", name, err) + } + log.Printf("container stopped successfully") + return nil +} + +func RemoveContainer(name string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("failed to create docker client : %s", err) + } + ctx := context.Background() + log.Printf("Removing docker metabase %s", name) + if err := cli.ContainerRemove(ctx, name, types.ContainerRemoveOptions{}); err != nil { + return fmt.Errorf("failed remove container %s : %s", name, err) + } + return nil +} + +func RemoveImageContainer() error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("failed to create docker client : %s", err) + } + ctx := context.Background() + log.Printf("Removing docker image '%s'", metabaseImage) + if _, err := cli.ImageRemove(ctx, metabaseImage, types.ImageRemoveOptions{}); err != nil { + return fmt.Errorf("failed remove image container %s : %s", metabaseImage, err) + } + return nil +} + +func IsContainerExist(name string) bool { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + log.Fatalf("failed to create docker client : %s", err) + } + ctx := context.Background() + if _, err := cli.ContainerInspect(ctx, name); err != nil { + return false + } + return true +} diff --git a/pkg/metabase/database.go b/pkg/metabase/database.go new file mode 100644 index 0000000..0a7890f --- /dev/null +++ b/pkg/metabase/database.go @@ -0,0 +1,101 @@ +package metabase + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/pkg/errors" +) + +type Database struct { + DBUrl string + Model *Model + Config *csconfig.DatabaseCfg + Client *APIClient + Details *Details + // in case mysql host is 127.0.0.1 the ip address of mysql/pgsql host will be the docker gateway since metabase run in a container +} + +type Details struct { + Db string `json:"db"` + Host string `json:"host"` + Port int `json:"port"` + Dbname string `json:"dbname"` + User string `json:"user"` + Password string `json:"password"` + Ssl bool `json:"ssl"` + AdditionalOptions interface{} `json:"additional-options"` + TunnelEnabled bool `json:"tunnel-enabled"` +} + +type Model struct { + Engine string `json:"engine"` + Name string `json:"name"` + Details *Details `json:"details"` + AutoRunQueries bool `json:"auto_run_queries"` + IsFullSync bool `json:"is_full_sync"` + IsOnDemand bool `json:"is_on_demand"` + Schedules map[string]interface{} `json:"schedules"` +} + +func NewDatabase(config *csconfig.DatabaseCfg, client *APIClient, remoteDBAddr string) (*Database, error) { + var details *Details + + database := Database{} + + switch config.Type { + case "mysql": + return nil, fmt.Errorf("database '%s' is not supported yet", config.Type) + case "sqlite": + database.DBUrl = metabaseSQLiteDBURL + localFolder := filepath.Dir(config.DbPath) + // replace /var/lib/crowdsec/data/ with /metabase-data/ + dbPath := strings.Replace(config.DbPath, localFolder, containerSharedFolder, 1) + details = &Details{ + Db: dbPath, + } + case "postgresql", "postgres", "pgsql": + return nil, fmt.Errorf("database '%s' is not supported yet", config.Type) + default: + return nil, fmt.Errorf("database '%s' not supported", config.Type) + } + database.Details = details + database.Client = client + database.Config = config + + return &database, nil +} + +func (d *Database) Update() error { + success, errormsg, err := d.Client.Do("GET", routes[databaseEndpoint], nil) + if err != nil { + return err + } + if errormsg != nil { + return fmt.Errorf("update sqlite db http error: %+v", errormsg) + } + + data, err := json.Marshal(success) + if err != nil { + return errors.Wrap(err, "update sqlite db response (marshal)") + } + + model := Model{} + + if err := json.Unmarshal(data, &model); err != nil { + return errors.Wrap(err, "update sqlite db response (unmarshal)") + } + model.Details = d.Details + _, errormsg, err = d.Client.Do("PUT", routes[databaseEndpoint], model) + if err != nil { + return err + } + if errormsg != nil { + return fmt.Errorf("update sqlite db http error: %+v", errormsg) + } + + return nil +} diff --git a/pkg/metabase/metabase.go b/pkg/metabase/metabase.go new file mode 100644 index 0000000..e2f7e0e --- /dev/null +++ b/pkg/metabase/metabase.go @@ -0,0 +1,383 @@ +package metabase + +import ( + "archive/zip" + "bytes" + "context" + "fmt" + "io" + "net/http" + "os" + "path" + "runtime" + "strings" + "time" + + "github.com/docker/docker/client" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +type Metabase struct { + Config *Config + Client *APIClient + Container *Container + Database *Database + InternalDBURL string +} + +type Config struct { + Database *csconfig.DatabaseCfg `yaml:"database"` + ListenAddr string `yaml:"listen_addr"` + ListenPort string `yaml:"listen_port"` + ListenURL string `yaml:"listen_url"` + Username string `yaml:"username"` + Password string `yaml:"password"` + DBPath string `yaml:"metabase_db_path"` + DockerGroupID string `yaml:"-"` +} + +var ( + metabaseDefaultUser = "crowdsec@crowdsec.net" + metabaseDefaultPassword = "!!Cr0wdS3c_M3t4b4s3??" + metabaseImage = "metabase/metabase:v0.41.5" + containerSharedFolder = "/metabase-data" + metabaseSQLiteDBURL = "https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/metabase_sqlite.zip" +) + +func TestAvailability() error { + if runtime.GOARCH != "amd64" { + return fmt.Errorf("cscli dashboard is only available on amd64, but you are running %s", runtime.GOARCH) + } + + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("failed to create docker client : %s", err) + } + + _, err = cli.Ping(context.TODO()) + return err + +} + +func (m *Metabase) Init(containerName string) error { + var err error + var DBConnectionURI string + var remoteDBAddr string + + switch m.Config.Database.Type { + case "mysql": + return fmt.Errorf("'mysql' is not supported yet for cscli dashboard") + //DBConnectionURI = fmt.Sprintf("MB_DB_CONNECTION_URI=mysql://%s:%d/%s?user=%s&password=%s&allowPublicKeyRetrieval=true", remoteDBAddr, m.Config.Database.Port, m.Config.Database.DbName, m.Config.Database.User, m.Config.Database.Password) + case "sqlite": + m.InternalDBURL = metabaseSQLiteDBURL + case "postgresql", "postgres", "pgsql": + return fmt.Errorf("'postgresql' is not supported yet by cscli dashboard") + default: + return fmt.Errorf("database '%s' not supported", m.Config.Database.Type) + } + + m.Client, err = NewAPIClient(m.Config.ListenURL) + if err != nil { + return err + } + m.Database, err = NewDatabase(m.Config.Database, m.Client, remoteDBAddr) + if err != nil { + return err + } + m.Container, err = NewContainer(m.Config.ListenAddr, m.Config.ListenPort, m.Config.DBPath, containerName, metabaseImage, DBConnectionURI, m.Config.DockerGroupID) + if err != nil { + return errors.Wrap(err, "container init") + } + + return nil +} + +func NewMetabase(configPath string, containerName string) (*Metabase, error) { + m := &Metabase{} + if err := m.LoadConfig(configPath); err != nil { + return m, err + } + if err := m.Init(containerName); err != nil { + return m, err + } + return m, nil +} + +func (m *Metabase) LoadConfig(configPath string) error { + yamlFile, err := os.ReadFile(configPath) + if err != nil { + return err + } + + config := &Config{} + + err = yaml.Unmarshal(yamlFile, config) + if err != nil { + return err + } + if config.Username == "" { + return fmt.Errorf("'username' not found in configuration file '%s'", configPath) + } + + if config.Password == "" { + return fmt.Errorf("'password' not found in configuration file '%s'", configPath) + } + + if config.ListenURL == "" { + return fmt.Errorf("'listen_url' not found in configuration file '%s'", configPath) + } + + m.Config = config + + return nil + +} + +func SetupMetabase(dbConfig *csconfig.DatabaseCfg, listenAddr string, listenPort string, username string, password string, mbDBPath string, dockerGroupID string, containerName string) (*Metabase, error) { + metabase := &Metabase{ + Config: &Config{ + Database: dbConfig, + ListenAddr: listenAddr, + ListenPort: listenPort, + Username: username, + Password: password, + ListenURL: fmt.Sprintf("http://%s:%s", listenAddr, listenPort), + DBPath: mbDBPath, + DockerGroupID: dockerGroupID, + }, + } + if err := metabase.Init(containerName); err != nil { + return nil, errors.Wrap(err, "metabase setup init") + } + + if err := metabase.DownloadDatabase(false); err != nil { + return nil, errors.Wrap(err, "metabase db download") + } + + if err := metabase.Container.Create(); err != nil { + return nil, errors.Wrap(err, "container create") + } + + if err := metabase.Container.Start(); err != nil { + return nil, errors.Wrap(err, "container start") + } + + log.Infof("waiting for metabase to be up (can take up to a minute)") + if err := metabase.WaitAlive(); err != nil { + return nil, errors.Wrap(err, "wait alive") + } + + if err := metabase.Database.Update(); err != nil { + return nil, errors.Wrap(err, "update database") + } + + if err := metabase.Scan(); err != nil { + return nil, errors.Wrap(err, "db scan") + } + + if err := metabase.ResetCredentials(); err != nil { + return nil, errors.Wrap(err, "reset creds") + } + + return metabase, nil +} + +func (m *Metabase) WaitAlive() error { + var err error + for { + err = m.Login(metabaseDefaultUser, metabaseDefaultPassword) + if err != nil { + if strings.Contains(err.Error(), "password:did not match stored password") { + log.Errorf("Password mismatch error, is your dashboard already setup ? Run 'cscli dashboard remove' to reset it.") + return errors.Wrapf(err, "Password mismatch error") + } + log.Debugf("%+v", err) + } else { + break + } + + fmt.Printf(".") + time.Sleep(2 * time.Second) + } + fmt.Printf("\n") + return nil +} + +func (m *Metabase) Login(username string, password string) error { + body := map[string]string{"username": username, "password": password} + successmsg, errormsg, err := m.Client.Do("POST", routes[sessionEndpoint], body) + if err != nil { + return err + } + + if errormsg != nil { + return errors.Wrap(err, "http login") + } + resp, ok := successmsg.(map[string]interface{}) + if !ok { + return fmt.Errorf("login: bad response type: %+v", successmsg) + } + if _, ok = resp["id"]; !ok { + return fmt.Errorf("login: can't update session id, no id in response: %v", successmsg) + } + id, ok := resp["id"].(string) + if !ok { + return fmt.Errorf("login: bad id type: %+v", resp["id"]) + } + m.Client.Set("Cookie", fmt.Sprintf("metabase.SESSION=%s", id)) + return nil +} + +func (m *Metabase) Scan() error { + _, errormsg, err := m.Client.Do("POST", routes[scanEndpoint], nil) + if err != nil { + return err + } + if errormsg != nil { + return errors.Wrap(err, "http scan") + } + + return nil +} + +func (m *Metabase) ResetPassword(current string, newPassword string) error { + body := map[string]string{ + "id": "1", + "password": newPassword, + "old_password": current, + } + _, errormsg, err := m.Client.Do("PUT", routes[resetPasswordEndpoint], body) + if err != nil { + return errors.Wrap(err, "reset username") + } + if errormsg != nil { + return errors.Wrap(err, "http reset password") + } + return nil +} + +func (m *Metabase) ResetUsername(username string) error { + body := struct { + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + Email string `json:"email"` + GroupIDs []int `json:"group_ids"` + }{ + FirstName: "Crowdsec", + LastName: "Crowdsec", + Email: username, + GroupIDs: []int{1, 2}, + } + + _, errormsg, err := m.Client.Do("PUT", routes[userEndpoint], body) + if err != nil { + return errors.Wrap(err, "reset username") + } + + if errormsg != nil { + return errors.Wrap(err, "http reset username") + } + + return nil +} + +func (m *Metabase) ResetCredentials() error { + if err := m.ResetPassword(metabaseDefaultPassword, m.Config.Password); err != nil { + return err + } + + /*if err := m.ResetUsername(m.Config.Username); err != nil { + return err + }*/ + + return nil +} + +func (m *Metabase) DumpConfig(path string) error { + data, err := yaml.Marshal(m.Config) + if err != nil { + return err + } + return os.WriteFile(path, data, 0600) +} + +func (m *Metabase) DownloadDatabase(force bool) error { + + metabaseDBSubpath := path.Join(m.Config.DBPath, "metabase.db") + _, err := os.Stat(metabaseDBSubpath) + if err == nil && !force { + log.Printf("%s exists, skip.", metabaseDBSubpath) + return nil + } + + if err := os.MkdirAll(metabaseDBSubpath, 0755); err != nil { + return fmt.Errorf("failed to create %s : %s", metabaseDBSubpath, err) + } + + req, err := http.NewRequest(http.MethodGet, m.InternalDBURL, nil) + if err != nil { + return fmt.Errorf("failed to build request to fetch metabase db : %s", err) + } + //This needs to be removed once we move the zip out of github + //req.Header.Add("Accept", `application/vnd.github.v3.raw`) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("failed request to fetch metabase db : %s", err) + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got http %d while requesting metabase db %s, stop", resp.StatusCode, m.InternalDBURL) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed request read while fetching metabase db : %s", err) + } + log.Debugf("Got %d bytes archive", len(body)) + + if err := m.ExtractDatabase(bytes.NewReader(body)); err != nil { + return fmt.Errorf("while extracting zip : %s", err) + } + return nil +} + +func (m *Metabase) ExtractDatabase(buf *bytes.Reader) error { + r, err := zip.NewReader(buf, int64(buf.Len())) + if err != nil { + return err + } + for _, f := range r.File { + if strings.Contains(f.Name, "..") { + return fmt.Errorf("invalid path '%s' in archive", f.Name) + } + tfname := fmt.Sprintf("%s/%s", m.Config.DBPath, f.Name) + log.Tracef("%s -> %d", f.Name, f.UncompressedSize64) + if f.UncompressedSize64 == 0 { + continue + } + tfd, err := os.OpenFile(tfname, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0644) + if err != nil { + return fmt.Errorf("failed opening target file '%s' : %s", tfname, err) + } + rc, err := f.Open() + if err != nil { + return fmt.Errorf("while opening zip content %s : %s", f.Name, err) + } + written, err := io.Copy(tfd, rc) + if err == io.EOF { + log.Printf("files finished ok") + } else if err != nil { + return fmt.Errorf("while copying content to %s : %s", tfname, err) + } + log.Debugf("written %d bytes to %s", written, tfname) + rc.Close() + } + return nil +} + +func RemoveDatabase(dataDir string) error { + return os.RemoveAll(path.Join(dataDir, "metabase.db")) +} diff --git a/pkg/models/add_alerts_request.go b/pkg/models/add_alerts_request.go new file mode 100644 index 0000000..fd7246b --- /dev/null +++ b/pkg/models/add_alerts_request.go @@ -0,0 +1,73 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// AddAlertsRequest AddAlertsRequest +// +// swagger:model AddAlertsRequest +type AddAlertsRequest []*Alert + +// Validate validates this add alerts request +func (m AddAlertsRequest) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this add alerts request based on the context it is used +func (m AddAlertsRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/add_alerts_response.go b/pkg/models/add_alerts_response.go new file mode 100644 index 0000000..cd8c98f --- /dev/null +++ b/pkg/models/add_alerts_response.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" +) + +// AddAlertsResponse AddAlertsResponse +// +// swagger:model AddAlertsResponse +type AddAlertsResponse []string + +// Validate validates this add alerts response +func (m AddAlertsResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this add alerts response based on context it is used +func (m AddAlertsResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/pkg/models/add_signals_request.go b/pkg/models/add_signals_request.go new file mode 100644 index 0000000..d9c2b36 --- /dev/null +++ b/pkg/models/add_signals_request.go @@ -0,0 +1,75 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// AddSignalsRequest add signals request +// +// All signals request model +// +// swagger:model AddSignalsRequest +type AddSignalsRequest []*AddSignalsRequestItem + +// Validate validates this add signals request +func (m AddSignalsRequest) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this add signals request based on the context it is used +func (m AddSignalsRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/add_signals_request_item.go b/pkg/models/add_signals_request_item.go new file mode 100644 index 0000000..2a5acdd --- /dev/null +++ b/pkg/models/add_signals_request_item.go @@ -0,0 +1,232 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// AddSignalsRequestItem Signal +// +// swagger:model AddSignalsRequestItem +type AddSignalsRequestItem struct { + + // created at + CreatedAt string `json:"created_at,omitempty"` + + // machine id + MachineID string `json:"machine_id,omitempty"` + + // a human readable message + // Required: true + Message *string `json:"message"` + + // scenario + // Required: true + Scenario *string `json:"scenario"` + + // scenario hash + // Required: true + ScenarioHash *string `json:"scenario_hash"` + + // scenario trust + // Required: true + ScenarioTrust *string `json:"scenario_trust"` + + // scenario version + // Required: true + ScenarioVersion *string `json:"scenario_version"` + + // source + // Required: true + Source *Source `json:"source"` + + // start at + // Required: true + StartAt *string `json:"start_at"` + + // stop at + // Required: true + StopAt *string `json:"stop_at"` +} + +// Validate validates this add signals request item +func (m *AddSignalsRequestItem) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMessage(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenario(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenarioHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenarioTrust(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenarioVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSource(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStartAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStopAt(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AddSignalsRequestItem) validateMessage(formats strfmt.Registry) error { + + if err := validate.Required("message", "body", m.Message); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateScenario(formats strfmt.Registry) error { + + if err := validate.Required("scenario", "body", m.Scenario); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateScenarioHash(formats strfmt.Registry) error { + + if err := validate.Required("scenario_hash", "body", m.ScenarioHash); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateScenarioTrust(formats strfmt.Registry) error { + + if err := validate.Required("scenario_trust", "body", m.ScenarioTrust); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateScenarioVersion(formats strfmt.Registry) error { + + if err := validate.Required("scenario_version", "body", m.ScenarioVersion); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateSource(formats strfmt.Registry) error { + + if err := validate.Required("source", "body", m.Source); err != nil { + return err + } + + if m.Source != nil { + if err := m.Source.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("source") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("source") + } + return err + } + } + + return nil +} + +func (m *AddSignalsRequestItem) validateStartAt(formats strfmt.Registry) error { + + if err := validate.Required("start_at", "body", m.StartAt); err != nil { + return err + } + + return nil +} + +func (m *AddSignalsRequestItem) validateStopAt(formats strfmt.Registry) error { + + if err := validate.Required("stop_at", "body", m.StopAt); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this add signals request item based on the context it is used +func (m *AddSignalsRequestItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateSource(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AddSignalsRequestItem) contextValidateSource(ctx context.Context, formats strfmt.Registry) error { + + if m.Source != nil { + if err := m.Source.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("source") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("source") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *AddSignalsRequestItem) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AddSignalsRequestItem) UnmarshalBinary(b []byte) error { + var res AddSignalsRequestItem + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/alert.go b/pkg/models/alert.go new file mode 100644 index 0000000..3d20fa6 --- /dev/null +++ b/pkg/models/alert.go @@ -0,0 +1,493 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Alert Alert +// +// swagger:model Alert +type Alert struct { + + // capacity + // Required: true + Capacity *int32 `json:"capacity"` + + // only relevant for GET, ignored in POST requests + // Read Only: true + CreatedAt string `json:"created_at,omitempty"` + + // decisions + Decisions []*Decision `json:"decisions"` + + // the Meta of the events leading to overflow + // Required: true + Events []*Event `json:"events"` + + // events count + // Required: true + EventsCount *int32 `json:"events_count"` + + // only relevant for GET, ignored in POST requests + // Read Only: true + ID int64 `json:"id,omitempty"` + + // labels + Labels []string `json:"labels"` + + // leakspeed + // Required: true + Leakspeed *string `json:"leakspeed"` + + // only relevant for APIL->APIC, ignored for cscli->APIL and crowdsec->APIL + // Read Only: true + MachineID string `json:"machine_id,omitempty"` + + // a human readable message + // Required: true + Message *string `json:"message"` + + // meta + Meta Meta `json:"meta,omitempty"` + + // remediation + Remediation bool `json:"remediation,omitempty"` + + // scenario + // Required: true + Scenario *string `json:"scenario"` + + // scenario hash + // Required: true + ScenarioHash *string `json:"scenario_hash"` + + // scenario version + // Required: true + ScenarioVersion *string `json:"scenario_version"` + + // simulated + // Required: true + Simulated *bool `json:"simulated"` + + // source + // Required: true + Source *Source `json:"source"` + + // start at + // Required: true + StartAt *string `json:"start_at"` + + // stop at + // Required: true + StopAt *string `json:"stop_at"` +} + +// Validate validates this alert +func (m *Alert) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCapacity(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDecisions(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEvents(formats); err != nil { + res = append(res, err) + } + + if err := m.validateEventsCount(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLeakspeed(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMessage(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMeta(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenario(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenarioHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenarioVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSimulated(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSource(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStartAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStopAt(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Alert) validateCapacity(formats strfmt.Registry) error { + + if err := validate.Required("capacity", "body", m.Capacity); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateDecisions(formats strfmt.Registry) error { + if swag.IsZero(m.Decisions) { // not required + return nil + } + + for i := 0; i < len(m.Decisions); i++ { + if swag.IsZero(m.Decisions[i]) { // not required + continue + } + + if m.Decisions[i] != nil { + if err := m.Decisions[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("decisions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("decisions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Alert) validateEvents(formats strfmt.Registry) error { + + if err := validate.Required("events", "body", m.Events); err != nil { + return err + } + + for i := 0; i < len(m.Events); i++ { + if swag.IsZero(m.Events[i]) { // not required + continue + } + + if m.Events[i] != nil { + if err := m.Events[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("events" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("events" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Alert) validateEventsCount(formats strfmt.Registry) error { + + if err := validate.Required("events_count", "body", m.EventsCount); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateLeakspeed(formats strfmt.Registry) error { + + if err := validate.Required("leakspeed", "body", m.Leakspeed); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateMessage(formats strfmt.Registry) error { + + if err := validate.Required("message", "body", m.Message); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateMeta(formats strfmt.Registry) error { + if swag.IsZero(m.Meta) { // not required + return nil + } + + if err := m.Meta.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + + return nil +} + +func (m *Alert) validateScenario(formats strfmt.Registry) error { + + if err := validate.Required("scenario", "body", m.Scenario); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateScenarioHash(formats strfmt.Registry) error { + + if err := validate.Required("scenario_hash", "body", m.ScenarioHash); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateScenarioVersion(formats strfmt.Registry) error { + + if err := validate.Required("scenario_version", "body", m.ScenarioVersion); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateSimulated(formats strfmt.Registry) error { + + if err := validate.Required("simulated", "body", m.Simulated); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateSource(formats strfmt.Registry) error { + + if err := validate.Required("source", "body", m.Source); err != nil { + return err + } + + if m.Source != nil { + if err := m.Source.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("source") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("source") + } + return err + } + } + + return nil +} + +func (m *Alert) validateStartAt(formats strfmt.Registry) error { + + if err := validate.Required("start_at", "body", m.StartAt); err != nil { + return err + } + + return nil +} + +func (m *Alert) validateStopAt(formats strfmt.Registry) error { + + if err := validate.Required("stop_at", "body", m.StopAt); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this alert based on the context it is used +func (m *Alert) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateCreatedAt(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateDecisions(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateEvents(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateID(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMachineID(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMeta(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSource(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Alert) contextValidateCreatedAt(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "created_at", "body", string(m.CreatedAt)); err != nil { + return err + } + + return nil +} + +func (m *Alert) contextValidateDecisions(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Decisions); i++ { + + if m.Decisions[i] != nil { + if err := m.Decisions[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("decisions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("decisions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Alert) contextValidateEvents(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Events); i++ { + + if m.Events[i] != nil { + if err := m.Events[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("events" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("events" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Alert) contextValidateID(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "id", "body", int64(m.ID)); err != nil { + return err + } + + return nil +} + +func (m *Alert) contextValidateMachineID(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "machine_id", "body", string(m.MachineID)); err != nil { + return err + } + + return nil +} + +func (m *Alert) contextValidateMeta(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Meta.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + + return nil +} + +func (m *Alert) contextValidateSource(ctx context.Context, formats strfmt.Registry) error { + + if m.Source != nil { + if err := m.Source.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("source") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("source") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Alert) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Alert) UnmarshalBinary(b []byte) error { + var res Alert + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/decision.go b/pkg/models/decision.go new file mode 100644 index 0000000..1a8d1b9 --- /dev/null +++ b/pkg/models/decision.go @@ -0,0 +1,198 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Decision Decision +// +// swagger:model Decision +type Decision struct { + + // the duration of the decisions + // Required: true + Duration *string `json:"duration"` + + // (only relevant for GET ops) the unique id + // Read Only: true + ID int64 `json:"id,omitempty"` + + // the origin of the decision : cscli, crowdsec + // Required: true + Origin *string `json:"origin"` + + // scenario + // Required: true + Scenario *string `json:"scenario"` + + // the scope of decision : does it apply to an IP, a range, a username, etc + // Required: true + Scope *string `json:"scope"` + + // true if the decision result from a scenario in simulation mode + // Read Only: true + Simulated *bool `json:"simulated,omitempty"` + + // the type of decision, might be 'ban', 'captcha' or something custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL. + // Required: true + Type *string `json:"type"` + + // the date until the decisions must be active + Until string `json:"until,omitempty"` + + // the value of the decision scope : an IP, a range, a username, etc + // Required: true + Value *string `json:"value"` +} + +// Validate validates this decision +func (m *Decision) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDuration(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOrigin(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScenario(formats); err != nil { + res = append(res, err) + } + + if err := m.validateScope(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Decision) validateDuration(formats strfmt.Registry) error { + + if err := validate.Required("duration", "body", m.Duration); err != nil { + return err + } + + return nil +} + +func (m *Decision) validateOrigin(formats strfmt.Registry) error { + + if err := validate.Required("origin", "body", m.Origin); err != nil { + return err + } + + return nil +} + +func (m *Decision) validateScenario(formats strfmt.Registry) error { + + if err := validate.Required("scenario", "body", m.Scenario); err != nil { + return err + } + + return nil +} + +func (m *Decision) validateScope(formats strfmt.Registry) error { + + if err := validate.Required("scope", "body", m.Scope); err != nil { + return err + } + + return nil +} + +func (m *Decision) validateType(formats strfmt.Registry) error { + + if err := validate.Required("type", "body", m.Type); err != nil { + return err + } + + return nil +} + +func (m *Decision) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this decision based on the context it is used +func (m *Decision) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateID(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSimulated(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Decision) contextValidateID(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "id", "body", int64(m.ID)); err != nil { + return err + } + + return nil +} + +func (m *Decision) contextValidateSimulated(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "simulated", "body", m.Simulated); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Decision) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Decision) UnmarshalBinary(b []byte) error { + var res Decision + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/decisions_stream_response.go b/pkg/models/decisions_stream_response.go new file mode 100644 index 0000000..8ec0c69 --- /dev/null +++ b/pkg/models/decisions_stream_response.go @@ -0,0 +1,142 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// DecisionsStreamResponse DecisionsStreamResponse +// +// swagger:model DecisionsStreamResponse +type DecisionsStreamResponse struct { + + // deleted + Deleted GetDecisionsResponse `json:"deleted,omitempty"` + + // new + New GetDecisionsResponse `json:"new,omitempty"` +} + +// Validate validates this decisions stream response +func (m *DecisionsStreamResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDeleted(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNew(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DecisionsStreamResponse) validateDeleted(formats strfmt.Registry) error { + if swag.IsZero(m.Deleted) { // not required + return nil + } + + if err := m.Deleted.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("deleted") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("deleted") + } + return err + } + + return nil +} + +func (m *DecisionsStreamResponse) validateNew(formats strfmt.Registry) error { + if swag.IsZero(m.New) { // not required + return nil + } + + if err := m.New.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("new") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("new") + } + return err + } + + return nil +} + +// ContextValidate validate this decisions stream response based on the context it is used +func (m *DecisionsStreamResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateDeleted(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateNew(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DecisionsStreamResponse) contextValidateDeleted(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Deleted.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("deleted") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("deleted") + } + return err + } + + return nil +} + +func (m *DecisionsStreamResponse) contextValidateNew(ctx context.Context, formats strfmt.Registry) error { + + if err := m.New.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("new") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("new") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *DecisionsStreamResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DecisionsStreamResponse) UnmarshalBinary(b []byte) error { + var res DecisionsStreamResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/delete_alerts_response.go b/pkg/models/delete_alerts_response.go new file mode 100644 index 0000000..67306e4 --- /dev/null +++ b/pkg/models/delete_alerts_response.go @@ -0,0 +1,50 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// DeleteAlertsResponse DeleteAlertsResponse +// +// swagger:model DeleteAlertsResponse +type DeleteAlertsResponse struct { + + // number of deleted alerts + NbDeleted string `json:"nbDeleted,omitempty"` +} + +// Validate validates this delete alerts response +func (m *DeleteAlertsResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this delete alerts response based on context it is used +func (m *DeleteAlertsResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DeleteAlertsResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DeleteAlertsResponse) UnmarshalBinary(b []byte) error { + var res DeleteAlertsResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/delete_decision_response.go b/pkg/models/delete_decision_response.go new file mode 100644 index 0000000..70423d6 --- /dev/null +++ b/pkg/models/delete_decision_response.go @@ -0,0 +1,50 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// DeleteDecisionResponse DeleteDecisionResponse +// +// swagger:model DeleteDecisionResponse +type DeleteDecisionResponse struct { + + // number of deleted decisions + NbDeleted string `json:"nbDeleted,omitempty"` +} + +// Validate validates this delete decision response +func (m *DeleteDecisionResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this delete decision response based on context it is used +func (m *DeleteDecisionResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *DeleteDecisionResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DeleteDecisionResponse) UnmarshalBinary(b []byte) error { + var res DeleteDecisionResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/error_response.go b/pkg/models/error_response.go new file mode 100644 index 0000000..db62a4d --- /dev/null +++ b/pkg/models/error_response.go @@ -0,0 +1,76 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ErrorResponse error response +// +// error response return by the API +// +// swagger:model ErrorResponse +type ErrorResponse struct { + + // more detail on individual errors + Errors string `json:"errors,omitempty"` + + // Error message + // Required: true + Message *string `json:"message"` +} + +// Validate validates this error response +func (m *ErrorResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMessage(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ErrorResponse) validateMessage(formats strfmt.Registry) error { + + if err := validate.Required("message", "body", m.Message); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this error response based on context it is used +func (m *ErrorResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ErrorResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ErrorResponse) UnmarshalBinary(b []byte) error { + var res ErrorResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/event.go b/pkg/models/event.go new file mode 100644 index 0000000..6e165eb --- /dev/null +++ b/pkg/models/event.go @@ -0,0 +1,120 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Event Event +// +// swagger:model Event +type Event struct { + + // meta + // Required: true + Meta Meta `json:"meta"` + + // timestamp + // Required: true + Timestamp *string `json:"timestamp"` +} + +// Validate validates this event +func (m *Event) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMeta(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTimestamp(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Event) validateMeta(formats strfmt.Registry) error { + + if err := validate.Required("meta", "body", m.Meta); err != nil { + return err + } + + if err := m.Meta.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + + return nil +} + +func (m *Event) validateTimestamp(formats strfmt.Registry) error { + + if err := validate.Required("timestamp", "body", m.Timestamp); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this event based on the context it is used +func (m *Event) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMeta(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Event) contextValidateMeta(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Meta.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Event) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Event) UnmarshalBinary(b []byte) error { + var res Event + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/flush_decision_response.go b/pkg/models/flush_decision_response.go new file mode 100644 index 0000000..b01dc35 --- /dev/null +++ b/pkg/models/flush_decision_response.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// FlushDecisionResponse FlushDecisionResponse +// +// swagger:model FlushDecisionResponse +type FlushDecisionResponse struct { + + // decision id + DecisionID string `json:"decision_id,omitempty"` +} + +// Validate validates this flush decision response +func (m *FlushDecisionResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *FlushDecisionResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *FlushDecisionResponse) UnmarshalBinary(b []byte) error { + var res FlushDecisionResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/get_alerts_response.go b/pkg/models/get_alerts_response.go new file mode 100644 index 0000000..41b9d5a --- /dev/null +++ b/pkg/models/get_alerts_response.go @@ -0,0 +1,73 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GetAlertsResponse AlertsResponse +// +// swagger:model GetAlertsResponse +type GetAlertsResponse []*Alert + +// Validate validates this get alerts response +func (m GetAlertsResponse) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this get alerts response based on the context it is used +func (m GetAlertsResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/get_decisions_response.go b/pkg/models/get_decisions_response.go new file mode 100644 index 0000000..b65b950 --- /dev/null +++ b/pkg/models/get_decisions_response.go @@ -0,0 +1,73 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GetDecisionsResponse GetDecisionsResponse +// +// swagger:model GetDecisionsResponse +type GetDecisionsResponse []*Decision + +// Validate validates this get decisions response +func (m GetDecisionsResponse) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this get decisions response based on the context it is used +func (m GetDecisionsResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/helpers.go b/pkg/models/helpers.go new file mode 100644 index 0000000..8c08255 --- /dev/null +++ b/pkg/models/helpers.go @@ -0,0 +1,76 @@ +package models + +func (a *Alert) HasRemediation() bool { + return true +} + +func (a *Alert) GetScope() string { + if a.Source.Scope == nil { + return "" + } + return *a.Source.Scope +} + +func (a *Alert) GetValue() string { + if a.Source.Value == nil { + return "" + } + return *a.Source.Value +} + +func (a *Alert) GetScenario() string { + if a.Scenario == nil { + return "" + } + return *a.Scenario +} + +func (a *Alert) GetEventsCount() int32 { + if a.EventsCount == nil { + return 0 + } + return *a.EventsCount +} + +func (e *Event) GetMeta(key string) string { + for _, meta := range e.Meta { + if meta.Key == key { + return meta.Value + } + } + return "" +} + +func (a *Alert) GetMeta(key string) string { + for _, meta := range a.Meta { + if meta.Key == key { + return meta.Value + } + } + return "" +} + +func (s Source) GetValue() string { + if s.Value == nil { + return "" + } + return *s.Value +} + +func (s Source) GetScope() string { + if s.Scope == nil { + return "" + } + return *s.Scope +} + +func (s Source) GetAsNumberName() string { + ret := "" + if s.AsNumber != "0" { + ret += s.AsNumber + } + if s.AsName != "" { + ret += " " + s.AsName + } + return ret +} diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml new file mode 100644 index 0000000..9d3bacb --- /dev/null +++ b/pkg/models/localapi_swagger.yaml @@ -0,0 +1,1049 @@ +swagger: '2.0' +info: + version: 1.0.0 + title: Swagger CrowdSec + description: CrowdSec local API + contact: + email: contact@crowdsec.net +host: 127.0.0.1 +basePath: /v1 +securityDefinitions: + JWTAuthorizer: + type: "apiKey" + name: "Authorization: Bearer" + in: "header" + APIKeyAuthorizer: + type: "apiKey" + name: "X-Api-Key" + in: "header" +schemes: + - https + - http +consumes: + - application/json +produces: + - application/json +paths: + /decisions/stream: + get: + description: Returns a list of new/expired decisions. Intended for bouncers that need to "stream" decisions + summary: getDecisionsStream + tags: + - bouncers + operationId: getDecisionsStream + deprecated: false + produces: + - application/json + parameters: + - name: startup + in: query + required: false + type: boolean + description: 'If true, means that the bouncers is starting and a full list must be provided' + - name: scopes + in: query + required: false + type: string + description: 'Comma separated scopes of decisions to fetch' + - name: origins + in: query + required: false + type: string + description: 'Comma separated name of origins. If provided, then only the decisions originating from provided origins would be returned.' + - name: scenarios_containing + in: query + required: false + type: string + description: 'Comma separated words. If provided, only the decisions created by scenarios containing any of the provided word would be returned.' + - name: scenarios_not_containing + in: query + required: false + type: string + description: 'Comma separated words. If provided, only the decisions created by scenarios, not containing any of the provided word would be returned.' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/DecisionsStreamResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - APIKeyAuthorizer: [] + head: + description: Returns a list of new/expired decisions. Intended for bouncers that need to "stream" decisions + summary: GetDecisionsStream + tags: + - bouncers + operationId: headDecisionsStream + deprecated: false + produces: + - application/json + parameters: + - name: startup + in: query + required: false + type: boolean + description: 'If true, means that the bouncer is starting and a full list must be provided' + responses: + '200': + description: successful operation + headers: {} + '400': + description: "400 response" + security: + - APIKeyAuthorizer: [] + /decisions: + get: + description: Returns information about existing decisions + summary: getDecisions + tags: + - bouncers + operationId: getDecisions + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: scope to which the decision applies (ie. IP/Range/Username/Session/...) + - name: value + in: query + required: false + type: string + description: the value to match for in the specified scope + - name: type + in: query + required: false + type: string + description: type of decision + - name: ip + in: query + required: false + type: string + description: IP to search for (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: range to search for (shorthand for scope=range&value=) + - name: contains + in: query + required: false + type: boolean + description: indicate if you're looking for a decision that contains the value, or that is contained within the value + - name: origins + in: query + required: false + type: string + description: 'Comma separated name of origins. If provided, then only the decisions originating from provided origins would be returned.' + - name: scenarios_containing + in: query + required: false + type: string + description: 'Comma separated words. If provided, only the decisions created by scenarios containing any of the provided word would be returned.' + - name: scenarios_not_containing + in: query + required: false + type: string + description: 'Comma separated words. If provided, only the decisions created by scenarios, not containing any of the provided word would be returned.' + responses: + '200': + description: "successful operation" + schema: + $ref: '#/definitions/GetDecisionsResponse' + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + head: + description: Returns information about existing decisions + summary: GetDecisions + tags: + - bouncers + operationId: headDecisions + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: scope to which the decision applies (ie. IP/Range/Username/Session/...) + - name: value + in: query + required: false + type: string + description: the value to match for in the specified scope + - name: type + in: query + required: false + type: string + description: type of decision + - name: ip + in: query + required: false + type: string + description: IP to search for (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: range to search for (shorthand for scope=range&value=) + - name: contains + in: query + required: false + type: boolean + description: indicate if you're looking for a decision that contains the value, or that is contained within the value + responses: + '200': + description: "successful operation" + '400': + description: "400 response" + security: + - APIKeyAuthorizer: [] + delete: + description: Delete decisions(s) for given filters (only from cscli) + summary: deleteDecisions + tags: + - watchers + operationId: deleteDecisions + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: scope to which the decision applies (ie. IP/Range/Username/Session/...) + - name: value + in: query + required: false + type: string + description: the value to match for in the specified scope + - name: type + in: query + required: false + type: string + description: type of decision + - name: ip + in: query + required: false + type: string + description: IP to search for (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: range to search for (shorthand for scope=range&value=) + - name: scenario + in: query + required: false + type: string + description: scenario to search + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/DeleteDecisionResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + '/decisions/{decision_id}': + delete: + description: Delete decision for given decision ID (only from cscli) + summary: DeleteDecision + tags: + - watchers + operationId: DeleteDecision + deprecated: false + produces: + - application/json + parameters: + - name: decision_id + in: path + required: true + type: string + description: '' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/DeleteDecisionResponse' + headers: {} + '404': + description: "404 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + /watchers: + post: + description: This method is used when installing crowdsec (cscli->APIL) + summary: RegisterWatcher + tags: + - watchers + operationId: RegisterWatcher + deprecated: false + produces: + - application/json + consumes: + - application/json + parameters: + - name: body + in: body + required: true + description: Information about the watcher to be registered + schema: + $ref: '#/definitions/WatcherRegistrationRequest' + responses: + '201': + description: Watcher Created + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + /watchers/login: + post: + description: Authenticate current to get session ID + summary: AuthenticateWatcher + tags: + - watchers + operationId: AuthenticateWatcher + deprecated: false + produces: + - application/json + consumes: + - application/json + parameters: + - name: body + in: body + required: true + description: Information about the watcher to be reset + schema: + $ref: '#/definitions/WatcherAuthRequest' + responses: + '200': + description: Login successful + schema: + $ref: '#/definitions/WatcherAuthResponse' + '403': + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + /alerts: + post: + description: Push alerts to API + summary: pushAlerts + tags: + - watchers + operationId: pushAlerts + deprecated: false + produces: + - application/json + consumes: + - application/json + parameters: + - name: body + in: body + required: true + description: Push alerts to the API + schema: + $ref: '#/definitions/AddAlertsRequest' + responses: + '201': + description: Alert(s) created + schema: + $ref: '#/definitions/AddAlertsResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + get: + description: Allows to search for alerts + summary: searchAlerts + tags: + - watchers + operationId: searchAlerts + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: show alerts for this scope + - name: value + in: query + required: false + type: string + description: show alerts for this value (used with scope) + - name: scenario + in: query + required: false + type: string + description: show alerts for this scenario + - name: ip + in: query + required: false + type: string + description: IP to search for (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: range to search for (shorthand for scope=range&value=) + - name: since #shouldn't "since" be a golang-duration format ? + in: query + required: false + type: string + format: date-time + description: 'search alerts newer than delay (format must be compatible with time.ParseDuration)' + - name: until #same as for "since" + in: query + description: 'search alerts older than delay (format must be compatible with time.ParseDuration)' + required: false + type: string + format: date-time + - name: simulated + in: query + required: false + type: boolean + description: if set to true, decisions in simulation mode will be returned as well + - name: has_active_decision + in: query + required: false + type: boolean + description: 'only return alerts with decisions not expired yet' + - name: decision_type + in: query + required: false + type: string + description: 'restrict results to alerts with decisions matching given type' + - name: limit + in: query + required: false + type: number + description: 'number of alerts to return' + - name: origin + in: query + required: false + type: string + description: 'restrict results to this origin (ie. lists,CAPI,cscli)' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/GetAlertsResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + head: + description: Allows to search for alerts + summary: searchAlerts + tags: + - watchers + operationId: headAlerts + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: show alerts for this scope + - name: value + in: query + required: false + type: string + description: show alerts for this value (used with scope) + - name: scenario + in: query + required: false + type: string + description: show alerts for this scenario + - name: ip + in: query + required: false + type: string + description: IP to search for (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: range to search for (shorthand for scope=range&value=) + - name: since #shouldn't "since" be a golang-duration format ? + in: query + required: false + type: string + format: date-time + description: 'search alerts newer than delay (format must be compatible with time.ParseDuration)' + - name: until #same as for "since" + in: query + description: 'search alerts older than delay (format must be compatible with time.ParseDuration)' + required: false + type: string + format: date-time + - name: simulated + in: query + required: false + type: boolean + description: if set to true, decisions in simulation mode will be returned as well + - name: has_active_decision + in: query + required: false + type: boolean + description: 'only return alerts with decisions not expired yet' + - name: decision_type + in: query + required: false + type: string + description: 'restrict results to alerts with decisions matching given type' + - name: limit + in: query + required: false + type: number + description: 'number of alerts to return' + - name: origin + in: query + required: false + type: string + description: 'restrict results to this origin (ie. lists,CAPI,cscli)' + responses: + '200': + description: successful operation + headers: {} + '400': + description: "400 response" + security: + - JWTAuthorizer: [] + delete: + description: Allows to delete alerts + summary: deleteAlerts + tags: + - watchers + operationId: deleteAlerts + deprecated: false + produces: + - application/json + parameters: + - name: scope + in: query + required: false + type: string + description: delete alerts for this scope + - name: value + in: query + required: false + type: string + description: delete alerts for this value (used with scope) + - name: scenario + in: query + required: false + type: string + description: delete alerts for this scenario + - name: ip + in: query + required: false + type: string + description: delete Alerts with IP (shorthand for scope=ip&value=) + - name: range + in: query + required: false + type: string + description: delete alerts concerned by range (shorthand for scope=range&value=) + - name: since + in: query + required: false + type: string + format: date-time + description: 'delete alerts added after YYYY-mm-DD-HH:MM:SS' + - name: until + in: query + required: false + type: string + format: date-time + description: 'delete alerts added before YYYY-mm-DD-HH:MM:SS' + - name: has_active_decision + in: query + required: false + type: boolean + description: 'delete only alerts with decisions not expired yet' + - name: alert_source + in: query + required: false + type: string + description: delete only alerts with matching source (ie. cscli/crowdsec) + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/DeleteAlertsResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + '/alerts/{alert_id}': + get: + description: Get alert by ID + summary: GetAlertByID + tags: + - watchers + operationId: GetAlertbyID + deprecated: false + produces: + - application/json + parameters: + - name: alert_id + in: path + required: true + type: string + description: '' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/Alert' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] + head: + description: Get alert by ID + summary: GetAlertByID + tags: + - watchers + operationId: HeadAlertbyID + deprecated: false + produces: + - application/json + parameters: + - name: alert_id + in: path + required: true + type: string + description: '' + responses: + '200': + description: successful operation + headers: {} + '400': + description: "400 response" + security: + - JWTAuthorizer: [] + delete: + description: Delete alert for given alert ID (only from cscli) + summary: DeleteAlert + tags: + - watchers + operationId: DeleteAlert + deprecated: false + produces: + - application/json + parameters: + - name: alert_id + in: path + required: true + type: string + description: '' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/DeleteAlertsResponse' + headers: {} + '404': + description: "404 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] +definitions: + WatcherRegistrationRequest: + title: WatcherRegistrationRequest + type: object + properties: + machine_id: + type: string + password: + type: string + format: password + required: + - machine_id + - password + WatcherAuthRequest: + title: WatcherAuthRequest + type: object + properties: + machine_id: + type: string + password: + type: string + format: password + scenarios: + description: the list of scenarios enabled on the watcher + type: array + items: + type: string + required: + - machine_id + - password + WatcherAuthResponse: + title: WatcherAuthResponse + description: the response of a successful authentication + type: object + properties: + code: + type: integer + expire: + type: string + token: + type: string + Alert: + title: Alert + type: object + properties: + id: + description: 'only relevant for GET, ignored in POST requests' + type: integer + readOnly: true + machine_id: + description: 'only relevant for APIL->APIC, ignored for cscli->APIL and crowdsec->APIL' + type: string + readOnly: true + created_at: + description: 'only relevant for GET, ignored in POST requests' + type: string + readOnly: true + scenario: + type: string + scenario_hash: + type: string + scenario_version: + type: string + message: + description: a human readable message + type: string + events_count: + type: integer + format: int32 + start_at: + type: string + stop_at: + type: string + capacity: + type: integer + format: int32 + leakspeed: + type: string + simulated: + type: boolean + events: + description: the Meta of the events leading to overflow + type: array + items: + $ref: '#/definitions/Event' + remediation: + type: boolean + decisions: + type: array + items: + $ref: '#/definitions/Decision' + source: + $ref: '#/definitions/Source' + meta: + $ref: '#/definitions/Meta' + labels: + type: array + items: + type: string + required: + - scenario + - scenario_hash + - scenario_version + - message + - events_count + - start_at + - stop_at + - capacity + - leakspeed + - simulated + - events + - source + Source: + title: Source + type: object + properties: + scope: + description: 'the scope of a source : ip,range,username,etc' + type: string + value: + description: 'the value of a source : the ip, the range, the username,etc' + type: string + ip: + description: provided as a convenience when the source is an IP + type: string + range: + description: provided as a convenience when the source is an IP + type: string + as_number: + description: provided as a convenience when the source is an IP + type: string + as_name: + description: provided as a convenience when the source is an IP + type: string + cn: + type: string + latitude: + type: number + format: float + longitude: + type: number + format: float + required: + - scope + - value + Metrics: + title: Metrics + type: object + properties: + apil_version: + description: the local version of crowdsec/apil + type: string + bouncers: + type: array + items: + $ref: '#/definitions/MetricsBouncerInfo' + machines: + type: array + items: + $ref: '#/definitions/MetricsAgentInfo' + required: + - apil_version + - bouncers + - machines + MetricsBouncerInfo: + title: MetricsBouncerInfo + description: Software version info (so we can warn users about out-of-date software). The software name and the version are "guessed" from the user-agent + type: object + properties: + custom_name: + type: string + description: name of the component + name: + type: string + description: bouncer type (firewall, php ...) + version: + type: string + description: software version + last_pull: + type: string + description: last bouncer pull date + MetricsAgentInfo: + title: MetricsAgentInfo + description: Software version info (so we can warn users about out-of-date software). The software name and the version are "guessed" from the user-agent + type: object + properties: + name: + type: string + description: name of the component + version: + type: string + description: software version + last_update: + type: string + description: last agent update date + last_push: + type: string + description: last agent push date + Decision: + title: Decision + type: object + properties: + id: + description: (only relevant for GET ops) the unique id + type: integer + readOnly: true + origin: + description: 'the origin of the decision : cscli, crowdsec' + type: string + type: + description: 'the type of decision, might be ''ban'', ''captcha'' or something custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL.' + type: string + scope: + description: 'the scope of decision : does it apply to an IP, a range, a username, etc' + type: string + value: + description: 'the value of the decision scope : an IP, a range, a username, etc' + type: string + duration: + description: 'the duration of the decisions' + type: string + until: + type: string + description: 'the date until the decisions must be active' + scenario: + type: string + simulated: + type: boolean + description: 'true if the decision result from a scenario in simulation mode' + readOnly: true + required: + - origin + - type + - scope + - value + - duration + - scenario + DeleteDecisionResponse: + title: DeleteDecisionResponse + type: object + properties: + nbDeleted: + type: string + description: "number of deleted decisions" + AddAlertsRequest: + title: AddAlertsRequest + type: array + items: + $ref: '#/definitions/Alert' + AddAlertsResponse: + title: AddAlertsResponse + type: array + items: + type: string + description: alert_id + GetAlertsResponse: + title: AlertsResponse + type: array + items: + $ref: '#/definitions/Alert' + DeleteAlertsResponse: + title: DeleteAlertsResponse + type: object + properties: + nbDeleted: + type: string + description: "number of deleted alerts" + DecisionsStreamResponse: + title: DecisionsStreamResponse + type: object + properties: + new: + $ref: '#/definitions/GetDecisionsResponse' + deleted: + $ref: '#/definitions/GetDecisionsResponse' + Event: + title: Event + type: object + properties: + timestamp: + type: string + meta: + $ref: '#/definitions/Meta' + required: + - timestamp + - meta + GetDecisionsResponse: + title: GetDecisionsResponse + type: array + items: + $ref: '#/definitions/Decision' + Meta: + title: Meta + description: the Meta data of the Alert itself + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + ErrorResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "Error message" + errors: + type: "string" + description: "more detail on individual errors" + title: "error response" + description: "error response return by the API" + AddSignalsRequest: + title: "add signals request" + type: "array" + description: "All signals request model" + items: + $ref: "#/definitions/AddSignalsRequestItem" + AddSignalsRequestItem: + type: "object" + required: + - "message" + - "scenario" + - "scenario_hash" + - "scenario_version" + - "source" + - "start_at" + - "stop_at" + - "scenario_trust" + properties: + scenario_hash: + type: "string" + scenario: + type: "string" + created_at: + type: "string" + machine_id: + type: "string" + source: + $ref: "#/definitions/Source" + scenario_version: + type: "string" + scenario_trust: + type: "string" + message: + type: "string" + description: "a human readable message" + start_at: + type: "string" + stop_at: + type: "string" + title: "Signal" +tags: + - name: bouncers + description: 'Operations about decisions : bans, captcha, rate-limit etc.' + - name: watchers + description: 'Operations about watchers : cscli & crowdsec' +externalDocs: + url: 'https://github.com/crowdsecurity/crowdsec' + description: Find out more about CrowdSec diff --git a/pkg/models/meta.go b/pkg/models/meta.go new file mode 100644 index 0000000..6ad2085 --- /dev/null +++ b/pkg/models/meta.go @@ -0,0 +1,115 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Meta Meta +// +// the Meta data of the Alert itself +// +// swagger:model Meta +type Meta []*MetaItems0 + +// Validate validates this meta +func (m Meta) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this meta based on the context it is used +func (m Meta) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MetaItems0 meta items0 +// +// swagger:model MetaItems0 +type MetaItems0 struct { + + // key + Key string `json:"key,omitempty"` + + // value + Value string `json:"value,omitempty"` +} + +// Validate validates this meta items0 +func (m *MetaItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this meta items0 based on context it is used +func (m *MetaItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MetaItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetaItems0) UnmarshalBinary(b []byte) error { + var res MetaItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics.go b/pkg/models/metrics.go new file mode 100644 index 0000000..573678d --- /dev/null +++ b/pkg/models/metrics.go @@ -0,0 +1,195 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Metrics Metrics +// +// swagger:model Metrics +type Metrics struct { + + // the local version of crowdsec/apil + // Required: true + ApilVersion *string `json:"apil_version"` + + // bouncers + // Required: true + Bouncers []*MetricsBouncerInfo `json:"bouncers"` + + // machines + // Required: true + Machines []*MetricsAgentInfo `json:"machines"` +} + +// Validate validates this metrics +func (m *Metrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateApilVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateBouncers(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMachines(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Metrics) validateApilVersion(formats strfmt.Registry) error { + + if err := validate.Required("apil_version", "body", m.ApilVersion); err != nil { + return err + } + + return nil +} + +func (m *Metrics) validateBouncers(formats strfmt.Registry) error { + + if err := validate.Required("bouncers", "body", m.Bouncers); err != nil { + return err + } + + for i := 0; i < len(m.Bouncers); i++ { + if swag.IsZero(m.Bouncers[i]) { // not required + continue + } + + if m.Bouncers[i] != nil { + if err := m.Bouncers[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("bouncers" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("bouncers" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Metrics) validateMachines(formats strfmt.Registry) error { + + if err := validate.Required("machines", "body", m.Machines); err != nil { + return err + } + + for i := 0; i < len(m.Machines); i++ { + if swag.IsZero(m.Machines[i]) { // not required + continue + } + + if m.Machines[i] != nil { + if err := m.Machines[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("machines" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("machines" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this metrics based on the context it is used +func (m *Metrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateBouncers(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMachines(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Metrics) contextValidateBouncers(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Bouncers); i++ { + + if m.Bouncers[i] != nil { + if err := m.Bouncers[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("bouncers" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("bouncers" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *Metrics) contextValidateMachines(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Machines); i++ { + + if m.Machines[i] != nil { + if err := m.Machines[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("machines" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("machines" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Metrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Metrics) UnmarshalBinary(b []byte) error { + var res Metrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics_agent_info.go b/pkg/models/metrics_agent_info.go new file mode 100644 index 0000000..27ffffc --- /dev/null +++ b/pkg/models/metrics_agent_info.go @@ -0,0 +1,61 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MetricsAgentInfo MetricsAgentInfo +// +// Software version info (so we can warn users about out-of-date software). The software name and the version are "guessed" from the user-agent +// +// swagger:model MetricsAgentInfo +type MetricsAgentInfo struct { + + // last agent push date + LastPush string `json:"last_push,omitempty"` + + // last agent update date + LastUpdate string `json:"last_update,omitempty"` + + // name of the component + Name string `json:"name,omitempty"` + + // software version + Version string `json:"version,omitempty"` +} + +// Validate validates this metrics agent info +func (m *MetricsAgentInfo) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this metrics agent info based on context it is used +func (m *MetricsAgentInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MetricsAgentInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetricsAgentInfo) UnmarshalBinary(b []byte) error { + var res MetricsAgentInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics_bouncer_info.go b/pkg/models/metrics_bouncer_info.go new file mode 100644 index 0000000..f38960e --- /dev/null +++ b/pkg/models/metrics_bouncer_info.go @@ -0,0 +1,61 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MetricsBouncerInfo MetricsBouncerInfo +// +// Software version info (so we can warn users about out-of-date software). The software name and the version are "guessed" from the user-agent +// +// swagger:model MetricsBouncerInfo +type MetricsBouncerInfo struct { + + // name of the component + CustomName string `json:"custom_name,omitempty"` + + // last bouncer pull date + LastPull string `json:"last_pull,omitempty"` + + // bouncer type (firewall, php ...) + Name string `json:"name,omitempty"` + + // software version + Version string `json:"version,omitempty"` +} + +// Validate validates this metrics bouncer info +func (m *MetricsBouncerInfo) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this metrics bouncer info based on context it is used +func (m *MetricsBouncerInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MetricsBouncerInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetricsBouncerInfo) UnmarshalBinary(b []byte) error { + var res MetricsBouncerInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/source.go b/pkg/models/source.go new file mode 100644 index 0000000..8b3a81a --- /dev/null +++ b/pkg/models/source.go @@ -0,0 +1,109 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Source Source +// +// swagger:model Source +type Source struct { + + // provided as a convenience when the source is an IP + AsName string `json:"as_name,omitempty"` + + // provided as a convenience when the source is an IP + AsNumber string `json:"as_number,omitempty"` + + // cn + Cn string `json:"cn,omitempty"` + + // provided as a convenience when the source is an IP + IP string `json:"ip,omitempty"` + + // latitude + Latitude float32 `json:"latitude,omitempty"` + + // longitude + Longitude float32 `json:"longitude,omitempty"` + + // provided as a convenience when the source is an IP + Range string `json:"range,omitempty"` + + // the scope of a source : ip,range,username,etc + // Required: true + Scope *string `json:"scope"` + + // the value of a source : the ip, the range, the username,etc + // Required: true + Value *string `json:"value"` +} + +// Validate validates this source +func (m *Source) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateScope(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Source) validateScope(formats strfmt.Registry) error { + + if err := validate.Required("scope", "body", m.Scope); err != nil { + return err + } + + return nil +} + +func (m *Source) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this source based on context it is used +func (m *Source) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Source) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Source) UnmarshalBinary(b []byte) error { + var res Source + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/topx_response.go b/pkg/models/topx_response.go new file mode 100644 index 0000000..fe23d39 --- /dev/null +++ b/pkg/models/topx_response.go @@ -0,0 +1,110 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// TopxResponse TopxResponse +// +// swagger:model TopxResponse +type TopxResponse struct { + + // We keep the deleted array for the duration of the initial decision. So that when the initial decision is expired, it won't be present in deleted array anymore. + Deleted [][]GetDecisionsResponse `json:"deleted"` + + // new + New [][]GetDecisionsResponse `json:"new"` +} + +// Validate validates this topx response +func (m *TopxResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDeleted(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNew(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TopxResponse) validateDeleted(formats strfmt.Registry) error { + + if swag.IsZero(m.Deleted) { // not required + return nil + } + + for i := 0; i < len(m.Deleted); i++ { + + for ii := 0; ii < len(m.Deleted[i]); ii++ { + + if err := m.Deleted[i][ii].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("deleted" + "." + strconv.Itoa(i) + "." + strconv.Itoa(ii)) + } + return err + } + + } + + } + + return nil +} + +func (m *TopxResponse) validateNew(formats strfmt.Registry) error { + + if swag.IsZero(m.New) { // not required + return nil + } + + for i := 0; i < len(m.New); i++ { + + for ii := 0; ii < len(m.New[i]); ii++ { + + if err := m.New[i][ii].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("new" + "." + strconv.Itoa(i) + "." + strconv.Itoa(ii)) + } + return err + } + + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *TopxResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TopxResponse) UnmarshalBinary(b []byte) error { + var res TopxResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/watcher_auth_request.go b/pkg/models/watcher_auth_request.go new file mode 100644 index 0000000..52347c5 --- /dev/null +++ b/pkg/models/watcher_auth_request.go @@ -0,0 +1,96 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// WatcherAuthRequest WatcherAuthRequest +// +// swagger:model WatcherAuthRequest +type WatcherAuthRequest struct { + + // machine id + // Required: true + MachineID *string `json:"machine_id"` + + // password + // Required: true + // Format: password + Password *strfmt.Password `json:"password"` + + // the list of scenarios enabled on the watcher + Scenarios []string `json:"scenarios"` +} + +// Validate validates this watcher auth request +func (m *WatcherAuthRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMachineID(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePassword(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *WatcherAuthRequest) validateMachineID(formats strfmt.Registry) error { + + if err := validate.Required("machine_id", "body", m.MachineID); err != nil { + return err + } + + return nil +} + +func (m *WatcherAuthRequest) validatePassword(formats strfmt.Registry) error { + + if err := validate.Required("password", "body", m.Password); err != nil { + return err + } + + if err := validate.FormatOf("password", "body", "password", m.Password.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this watcher auth request based on context it is used +func (m *WatcherAuthRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *WatcherAuthRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *WatcherAuthRequest) UnmarshalBinary(b []byte) error { + var res WatcherAuthRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/watcher_auth_response.go b/pkg/models/watcher_auth_response.go new file mode 100644 index 0000000..296812d --- /dev/null +++ b/pkg/models/watcher_auth_response.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// WatcherAuthResponse WatcherAuthResponse +// +// the response of a successful authentication +// +// swagger:model WatcherAuthResponse +type WatcherAuthResponse struct { + + // code + Code int64 `json:"code,omitempty"` + + // expire + Expire string `json:"expire,omitempty"` + + // token + Token string `json:"token,omitempty"` +} + +// Validate validates this watcher auth response +func (m *WatcherAuthResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this watcher auth response based on context it is used +func (m *WatcherAuthResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *WatcherAuthResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *WatcherAuthResponse) UnmarshalBinary(b []byte) error { + var res WatcherAuthResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/watcher_registration_request.go b/pkg/models/watcher_registration_request.go new file mode 100644 index 0000000..8be802e --- /dev/null +++ b/pkg/models/watcher_registration_request.go @@ -0,0 +1,93 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// WatcherRegistrationRequest WatcherRegistrationRequest +// +// swagger:model WatcherRegistrationRequest +type WatcherRegistrationRequest struct { + + // machine id + // Required: true + MachineID *string `json:"machine_id"` + + // password + // Required: true + // Format: password + Password *strfmt.Password `json:"password"` +} + +// Validate validates this watcher registration request +func (m *WatcherRegistrationRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMachineID(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePassword(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *WatcherRegistrationRequest) validateMachineID(formats strfmt.Registry) error { + + if err := validate.Required("machine_id", "body", m.MachineID); err != nil { + return err + } + + return nil +} + +func (m *WatcherRegistrationRequest) validatePassword(formats strfmt.Registry) error { + + if err := validate.Required("password", "body", m.Password); err != nil { + return err + } + + if err := validate.FormatOf("password", "body", "password", m.Password.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this watcher registration request based on context it is used +func (m *WatcherRegistrationRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *WatcherRegistrationRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *WatcherRegistrationRequest) UnmarshalBinary(b []byte) error { + var res WatcherRegistrationRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/parser/README.md b/pkg/parser/README.md new file mode 100644 index 0000000..46111e3 --- /dev/null +++ b/pkg/parser/README.md @@ -0,0 +1,181 @@ +![gopherbadger-tag-do-not-edit] + +# Parser + +Parser is in charge of turning raw log lines into objects that can be manipulated by heuristics. +Parsing has several stages represented by directories on config/stage. +The alphabetical order dictates the order in which the stages/parsers are processed. + +The runtime representation of a line being parsed (or an overflow) is an `Event`, and has fields that can be manipulated by user : + - Parsed : a string dict containing parser outputs + - Meta : a string dict containing meta information about the event + - Line : a raw line representation + - Overflow : a representation of the overflow if applicable + +The Event structure goes through the stages, being altered with each parsing step. +It's the same object that will be later poured into buckets. + +# Parser configuration + +A parser configuration is a `Node` object, that can contain grok patterns, enrichement instructions. + +For example : + +```yaml +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +pattern_syntax: + MYCAP: ".*" +nodes: + - grok: + pattern: ^xxheader %{MYCAP:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: log_type + value: parsed_testlog +``` + +### Name + +*optional* if present and prometheus or profiling are activated, stats will be generated for this node. + +### Filter + +> `filter: "Line.Src endsWith '/foobar'"` + + - *optional* `filter` : an [expression](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md) that will be evaluated against the runtime of a line (`Event`) + - if the `filter` is present and returns false, node is not evaluated + - if `filter` is absent or present and returns true, node is evaluated + +### Debug flag + +> `debug: true` + + - *optional* `debug` : a bool that sets debug of the node to true (applies at runtime and configuration parsing) + +### OnSuccess flag +> `onsuccess: next_stage|continue` + + - *mandatory* indicates the behaviour to follow if node succeeds. `next_stage` make line go to next stage, while `continue` will continue processing of current stage. + +### Statics + +```yaml +statics: + - meta: service + value: tcp + - meta: source_ip + expression: "Event['source_ip']" + - parsed: "new_connection" + expression: "Event['tcpflags'] contains 'S' ? 'true' : 'false'" + - target: Parsed.this_is_a_test + value: foobar +``` + +Statics apply when a node is considered successful, and are used to alter the `Event` structure. +An empty node, a node with a grok pattern that succeeded or an enrichment directive that worked are successful nodes. +Statics can : + - meta: add/alter an entry in the `Meta` dict + - parsed: add/alter an entry in the `Parsed` dict + - target: indicate a destination field by name, such as Meta.my_key +The source of data can be : + - value: a static value + - expr_result : the result of an expression + + +### Grok patterns + +Grok patterns are used to parse one field of `Event` into one or several others : + +```yaml +grok: + name: "TCPDUMP_OUTPUT" + apply_on: message +``` + +`name` is the name of a pattern loaded from `patterns/`. +Base patterns can be seen on the repo : https://github.com/crowdsecurity/grokky/blob/master/base.go + + +--- + + +```yaml +grok: + pattern: "^%{GREEDYDATA:request}\\?%{GREEDYDATA:http_args}$" + apply_on: request +``` +`pattern` which is a valid pattern, optionally with a `apply_on` that indicates to which field it should be applied + + +### Patterns syntax + +Present at the `Event` level, the `pattern_syntax` is a list of subgroks to be declared. + +```yaml +pattern_syntax: + DIR: "^.*/" + FILE: "[^/].*$" +``` + + +### Enrichment + +Enrichment mechanism is exposed via statics : + +```yaml +statics: + - method: GeoIpCity + expression: Meta.source_ip + - meta: IsoCode + expression: Enriched.IsoCode + - meta: IsInEU + expression: Enriched.IsInEU +``` + +The `GeoIpCity` method is called with the value of `Meta.source_ip`. +Enrichment plugins can output one or more key:values in the `Enriched` map, +and it's up to the user to copy the relevant values to `Meta` or such. + +# Trees + +The `Node` object allows as well a `nodes` entry, which is a list of `Node` entries, allowing you to build trees. + +```yaml +filter: "Event['program'] == 'nginx'" #A +nodes: #A' + - grok: #B + name: "NGINXACCESS" + # this statics will apply only if the above grok pattern matched + statics: #B' + - meta: log_type + value: "http_access-log" + - grok: #C + name: "NGINXERROR" + statics: + - meta: log_type + value: "http_error-log" +statics: #D + - meta: service + value: http +``` + +The evaluation process of a node is as follow : + - apply the `filter` (A), if it doesn't match, exit + - iterate over the list of nodes (A') and apply the node process to each. + - if a `grok` entry is present, process it + - if the `grok` entry returned data, apply the local statics of the node (if the grok 'B' was successful, apply B' statics) + - if any of the `nodes` or the `grok` was successful, apply the statics (D) + +# Code Organisation + +Main structs : + - Node (config.go) : the runtime representation of parser configuration + - Event (runtime.go) : the runtime representation of the line being parsed + +Main funcs : + - CompileNode : turns YAML into runtime-ready tree (Node) + - ProcessNode : process the raw line against the parser tree, and produces ready-for-buckets data + diff --git a/pkg/parser/enrich.go b/pkg/parser/enrich.go new file mode 100644 index 0000000..2cc56af --- /dev/null +++ b/pkg/parser/enrich.go @@ -0,0 +1,71 @@ +package parser + +import ( + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +/* should be part of a packaged shared with enrich/geoip.go */ +type EnrichFunc func(string, *types.Event, interface{}, *log.Entry) (map[string]string, error) +type InitFunc func(map[string]string) (interface{}, error) + +type EnricherCtx struct { + Registered map[string]*Enricher +} + +type Enricher struct { + Name string + InitFunc InitFunc + EnrichFunc EnrichFunc + Ctx interface{} +} + +/* mimic plugin loading */ +func Loadplugin(path string) (EnricherCtx, error) { + enricherCtx := EnricherCtx{} + enricherCtx.Registered = make(map[string]*Enricher) + + enricherConfig := map[string]string{"datadir": path} + + EnrichersList := []*Enricher{ + { + Name: "GeoIpCity", + InitFunc: GeoIPCityInit, + EnrichFunc: GeoIpCity, + }, + { + Name: "GeoIpASN", + InitFunc: GeoIPASNInit, + EnrichFunc: GeoIpASN, + }, + { + Name: "IpToRange", + InitFunc: IpToRangeInit, + EnrichFunc: IpToRange, + }, + { + Name: "reverse_dns", + InitFunc: reverseDNSInit, + EnrichFunc: reverse_dns, + }, + { + Name: "ParseDate", + InitFunc: parseDateInit, + EnrichFunc: ParseDate, + }, + } + + for _, enricher := range EnrichersList { + log.Debugf("Initiating enricher '%s'", enricher.Name) + pluginCtx, err := enricher.InitFunc(enricherConfig) + if err != nil { + log.Errorf("unable to register plugin '%s': %v", enricher.Name, err) + continue + } + enricher.Ctx = pluginCtx + log.Infof("Successfully registered enricher '%s'", enricher.Name) + enricherCtx.Registered[enricher.Name] = enricher + } + + return enricherCtx, nil +} diff --git a/pkg/parser/enrich_date.go b/pkg/parser/enrich_date.go new file mode 100644 index 0000000..948e176 --- /dev/null +++ b/pkg/parser/enrich_date.go @@ -0,0 +1,91 @@ +package parser + +import ( + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +func parseDateWithFormat(date, format string) (string, time.Time) { + t, err := time.Parse(format, date) + if err == nil && !t.IsZero() { + //if the year isn't set, set it to current date :) + if t.Year() == 0 { + t = t.AddDate(time.Now().UTC().Year(), 0, 0) + } + retstr, err := t.MarshalText() + if err != nil { + log.Warningf("Failed marshaling '%v'", t) + return "", time.Time{} + } + return string(retstr), t + } + return "", time.Time{} +} + +func GenDateParse(date string) (string, time.Time) { + var ( + layouts = [...]string{ + time.RFC3339, + "02/Jan/2006:15:04:05 -0700", + "Mon Jan 2 15:04:05 2006", + "02-Jan-2006 15:04:05 europe/paris", + "01/02/2006 15:04:05", + "2006-01-02 15:04:05.999999999 -0700 MST", + "Jan 2 15:04:05", + "Mon Jan 02 15:04:05.000000 2006", + "2006-01-02T15:04:05Z07:00", + "2006/01/02", + "2006/01/02 15:04", + "2006-01-02", + "2006-01-02 15:04", + "2006/01/02 15:04:05", + "2006-01-02 15:04:05", + } + ) + + for _, dateFormat := range layouts { + retstr, parsedDate := parseDateWithFormat(date, dateFormat) + if !parsedDate.IsZero() { + return retstr, parsedDate + } + } + return "", time.Time{} +} + +func ParseDate(in string, p *types.Event, x interface{}, plog *log.Entry) (map[string]string, error) { + + var ret map[string]string = make(map[string]string) + var strDate string + var parsedDate time.Time + + if p.StrTimeFormat != "" { + strDate, parsedDate = parseDateWithFormat(in, p.StrTimeFormat) + if !parsedDate.IsZero() { + ret["MarshaledTime"] = strDate + return ret, nil + } else { + plog.Debugf("unable to parse '%s' with layout '%s'", in, p.StrTimeFormat) + } + } + strDate, parsedDate = GenDateParse(in) + if !parsedDate.IsZero() { + ret["MarshaledTime"] = strDate + return ret, nil + } + plog.Debugf("no suitable date format found for '%s', falling back to now", in) + now := time.Now().UTC() + retstr, err := now.MarshalText() + if err != nil { + plog.Warning("Failed marshaling current time") + return ret, err + } + ret["MarshaledTime"] = string(retstr) + + return ret, nil +} + +func parseDateInit(cfg map[string]string) (interface{}, error) { + return nil, nil +} diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go new file mode 100644 index 0000000..5a3e08f --- /dev/null +++ b/pkg/parser/enrich_date_test.go @@ -0,0 +1,66 @@ +package parser + +import ( + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" +) + +func TestDateParse(t *testing.T) { + tests := []struct { + name string + evt types.Event + expected_err *error + expected_strTime *string + }{ + { + name: "RFC3339", + evt: types.Event{ + StrTime: "2019-10-12T07:20:50.52Z", + }, + expected_err: nil, + expected_strTime: types.StrPtr("2019-10-12T07:20:50.52Z"), + }, + { + name: "02/Jan/2006:15:04:05 -0700", + evt: types.Event{ + StrTime: "02/Jan/2006:15:04:05 -0700", + }, + expected_err: nil, + expected_strTime: types.StrPtr("2006-01-02T15:04:05-07:00"), + }, + { + name: "Dec 17 08:17:43", + evt: types.Event{ + StrTime: "2011 X 17 zz 08X17X43 oneone Dec", + StrTimeFormat: "2006 X 2 zz 15X04X05 oneone Jan", + }, + expected_err: nil, + expected_strTime: types.StrPtr("2011-12-17T08:17:43Z"), + }, + } + + logger := log.WithFields(log.Fields{ + "test": "test", + }) + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + strTime, err := ParseDate(tt.evt.StrTime, &tt.evt, nil, logger) + if tt.expected_err != nil { + if err != *tt.expected_err { + t.Errorf("%s: expected error %v, got %v", tt.name, tt.expected_err, err) + } + } else if err != nil { + t.Errorf("%s: expected no error, got %v", tt.name, err) + } + if err != nil { + return + } + if tt.expected_strTime != nil && strTime["MarshaledTime"] != *tt.expected_strTime { + t.Errorf("expected strTime %s, got %s", *tt.expected_strTime, strTime["MarshaledTime"]) + } + }) + } +} diff --git a/pkg/parser/enrich_dns.go b/pkg/parser/enrich_dns.go new file mode 100644 index 0000000..5bbc0e9 --- /dev/null +++ b/pkg/parser/enrich_dns.go @@ -0,0 +1,31 @@ +package parser + +import ( + "net" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + //"github.com/crowdsecurity/crowdsec/pkg/parser" +) + +/* All plugins must export a list of function pointers for exported symbols */ +//var ExportedFuncs = []string{"reverse_dns"} + +func reverse_dns(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { + ret := make(map[string]string) + if field == "" { + return nil, nil + } + rets, err := net.LookupAddr(field) + if err != nil { + plog.Debugf("failed to resolve '%s'", field) + return nil, nil //nolint:nilerr + } + //When using the host C library resolver, at most one result will be returned. To bypass the host resolver, use a custom Resolver. + ret["reverse_dns"] = rets[0] + return ret, nil +} + +func reverseDNSInit(cfg map[string]string) (interface{}, error) { + return nil, nil +} diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go new file mode 100644 index 0000000..8c25bef --- /dev/null +++ b/pkg/parser/enrich_geoip.go @@ -0,0 +1,130 @@ +package parser + +import ( + "fmt" + "net" + "strconv" + + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + + "github.com/oschwald/geoip2-golang" + "github.com/oschwald/maxminddb-golang" +) + +func IpToRange(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { + var dummy interface{} + ret := make(map[string]string) + + if field == "" { + return nil, nil + } + ip := net.ParseIP(field) + if ip == nil { + plog.Infof("Can't parse ip %s, no range enrich", field) + return nil, nil + } + net, ok, err := ctx.(*maxminddb.Reader).LookupNetwork(ip, &dummy) + if err != nil { + plog.Errorf("Failed to fetch network for %s : %v", ip.String(), err) + return nil, nil + } + if !ok { + plog.Debugf("Unable to find range of %s", ip.String()) + return nil, nil + } + ret["SourceRange"] = net.String() + return ret, nil +} + +func GeoIpASN(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { + ret := make(map[string]string) + if field == "" { + return nil, nil + } + + ip := net.ParseIP(field) + if ip == nil { + plog.Infof("Can't parse ip %s, no ASN enrich", ip) + return nil, nil + } + record, err := ctx.(*geoip2.Reader).ASN(ip) + if err != nil { + plog.Errorf("Unable to enrich ip '%s'", field) + return nil, nil //nolint:nilerr + } + ret["ASNNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber) + ret["ASNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber) + ret["ASNOrg"] = record.AutonomousSystemOrganization + + plog.Tracef("geoip ASN %s -> %s, %s", field, ret["ASNNumber"], ret["ASNOrg"]) + + return ret, nil +} + +func GeoIpCity(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { + ret := make(map[string]string) + if field == "" { + return nil, nil + } + ip := net.ParseIP(field) + if ip == nil { + plog.Infof("Can't parse ip %s, no City enrich", ip) + return nil, nil + } + record, err := ctx.(*geoip2.Reader).City(ip) + if err != nil { + plog.Debugf("Unable to enrich ip '%s'", ip) + return nil, nil //nolint:nilerr + } + if record.Country.IsoCode != "" { + ret["IsoCode"] = record.Country.IsoCode + ret["IsInEU"] = strconv.FormatBool(record.Country.IsInEuropeanUnion) + } else if record.RegisteredCountry.IsoCode != "" { + ret["IsoCode"] = record.RegisteredCountry.IsoCode + ret["IsInEU"] = strconv.FormatBool(record.RegisteredCountry.IsInEuropeanUnion) + } else if record.RepresentedCountry.IsoCode != "" { + ret["IsoCode"] = record.RepresentedCountry.IsoCode + ret["IsInEU"] = strconv.FormatBool(record.RepresentedCountry.IsInEuropeanUnion) + } else { + ret["IsoCode"] = "" + ret["IsInEU"] = strconv.FormatBool(false) + } + + ret["Latitude"] = fmt.Sprintf("%f", record.Location.Latitude) + ret["Longitude"] = fmt.Sprintf("%f", record.Location.Longitude) + + plog.Tracef("geoip City %s -> %s, %s", field, ret["IsoCode"], ret["IsInEU"]) + + return ret, nil +} + +func GeoIPCityInit(cfg map[string]string) (interface{}, error) { + dbCityReader, err := geoip2.Open(cfg["datadir"] + "/GeoLite2-City.mmdb") + if err != nil { + log.Debugf("couldn't open geoip : %v", err) + return nil, err + } + + return dbCityReader, nil +} + +func GeoIPASNInit(cfg map[string]string) (interface{}, error) { + dbASReader, err := geoip2.Open(cfg["datadir"] + "/GeoLite2-ASN.mmdb") + if err != nil { + log.Debugf("couldn't open geoip : %v", err) + return nil, err + } + + return dbASReader, nil +} + +func IpToRangeInit(cfg map[string]string) (interface{}, error) { + ipToRangeReader, err := maxminddb.Open(cfg["datadir"] + "/GeoLite2-ASN.mmdb") + if err != nil { + log.Debugf("couldn't open geoip : %v", err) + return nil, err + } + + return ipToRangeReader, nil +} diff --git a/pkg/parser/node.go b/pkg/parser/node.go new file mode 100644 index 0000000..38af6fb --- /dev/null +++ b/pkg/parser/node.go @@ -0,0 +1,557 @@ +package parser + +import ( + "fmt" + "net" + "strings" + + "github.com/antonmedv/expr" + "github.com/crowdsecurity/grokky" + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" + + "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + log "github.com/sirupsen/logrus" +) + +type Node struct { + FormatVersion string `yaml:"format"` + //Enable config + runtime debug of node via config o/ + Debug bool `yaml:"debug,omitempty"` + //If enabled, the node (and its child) will report their own statistics + Profiling bool `yaml:"profiling,omitempty"` + //Name, author, description and reference(s) for parser pattern + Name string `yaml:"name,omitempty"` + Author string `yaml:"author,omitempty"` + Description string `yaml:"description,omitempty"` + References []string `yaml:"references,omitempty"` + //if debug is present in the node, keep its specific Logger in runtime structure + Logger *log.Entry `yaml:"-"` + //This is mostly a hack to make writing less repetitive. + //relying on stage, we know which field to parse, and we + //can also promote log to next stage on success + Stage string `yaml:"stage,omitempty"` + //OnSuccess allows to tag a node to be able to move log to next stage on success + OnSuccess string `yaml:"onsuccess,omitempty"` + rn string //this is only for us in debug, a random generated name for each node + //Filter is executed at runtime (with current log line as context) + //and must succeed or node is exited + Filter string `yaml:"filter,omitempty"` + RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter + ExprDebugger *exprhelpers.ExprDebugger `yaml:"-" json:"-"` //used to debug expression by printing the content of each variable of the expression + //If node has leafs, execute all of them until one asks for a 'break' + LeavesNodes []Node `yaml:"nodes,omitempty"` + //Flag used to describe when to 'break' or return an 'error' + EnrichFunctions EnricherCtx + + /* If the node is actually a leaf, it can have : grok, enrich, statics */ + //pattern_syntax are named grok patterns that are re-utilized over several grok patterns + SubGroks yaml.MapSlice `yaml:"pattern_syntax,omitempty"` + + //Holds a grok pattern + Grok types.GrokPattern `yaml:"grok,omitempty"` + //Statics can be present in any type of node and is executed last + Statics []types.ExtraField `yaml:"statics,omitempty"` + //Whitelists + Whitelist Whitelist `yaml:"whitelist,omitempty"` + Data []*types.DataSource `yaml:"data,omitempty"` +} + +func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { + + //stage is being set automagically + if n.Stage == "" { + return fmt.Errorf("stage needs to be an existing stage") + } + + /* "" behaves like continue */ + if n.OnSuccess != "continue" && n.OnSuccess != "next_stage" && n.OnSuccess != "" { + return fmt.Errorf("onsuccess '%s' not continue,next_stage", n.OnSuccess) + } + if n.Filter != "" && n.RunTimeFilter == nil { + return fmt.Errorf("non-empty filter '%s' was not compiled", n.Filter) + } + + if n.Grok.RunTimeRegexp != nil || n.Grok.TargetField != "" { + if n.Grok.TargetField == "" && n.Grok.ExpValue == "" { + return fmt.Errorf("grok requires 'expression' or 'apply_on'") + } + if n.Grok.RegexpName == "" && n.Grok.RegexpValue == "" { + return fmt.Errorf("grok needs 'pattern' or 'name'") + } + } + + for idx, static := range n.Statics { + if static.Method != "" { + if static.ExpValue == "" { + return fmt.Errorf("static %d : when method is set, expression must be present", idx) + } + if _, ok := ectx.Registered[static.Method]; !ok { + log.Warningf("the method '%s' doesn't exist or the plugin has not been initialized", static.Method) + } + } else { + if static.Meta == "" && static.Parsed == "" && static.TargetByName == "" { + return fmt.Errorf("static %d : at least one of meta/event/target must be set", idx) + } + if static.Value == "" && static.RunTimeValue == nil { + return fmt.Errorf("static %d value or expression must be set", idx) + } + } + } + return nil +} + +func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) { + var NodeState bool + var NodeHasOKGrok bool + clog := n.Logger + + cachedExprEnv := expressionEnv + + clog.Tracef("Event entering node") + if n.RunTimeFilter != nil { + //Evaluate node's filter + output, err := expr.Run(n.RunTimeFilter, cachedExprEnv) + if err != nil { + clog.Warningf("failed to run filter : %v", err) + clog.Debugf("Event leaving node : ko") + return false, nil + } + + switch out := output.(type) { + case bool: + if n.Debug { + n.ExprDebugger.Run(clog, out, cachedExprEnv) + } + if !out { + clog.Debugf("Event leaving node : ko (failed filter)") + return false, nil + } + default: + clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output) + clog.Debugf("Event leaving node : ko") + return false, nil + } + NodeState = true + } else { + clog.Tracef("Node has not filter, enter") + NodeState = true + } + + if n.Name != "" { + NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() + } + isWhitelisted := false + hasWhitelist := false + var srcs []net.IP + /*overflow and log don't hold the source ip in the same field, should be changed */ + /* perform whitelist checks for ips, cidr accordingly */ + /* TODO move whitelist elsewhere */ + if p.Type == types.LOG { + if _, ok := p.Meta["source_ip"]; ok { + srcs = append(srcs, net.ParseIP(p.Meta["source_ip"])) + } + } else if p.Type == types.OVFLW { + for k := range p.Overflow.Sources { + srcs = append(srcs, net.ParseIP(k)) + } + } + for _, src := range srcs { + if isWhitelisted { + break + } + for _, v := range n.Whitelist.B_Ips { + if v.Equal(src) { + clog.Debugf("Event from [%s] is whitelisted by IP (%s), reason [%s]", src, v, n.Whitelist.Reason) + isWhitelisted = true + } else { + clog.Tracef("whitelist: %s is not eq [%s]", src, v) + } + hasWhitelist = true + } + for _, v := range n.Whitelist.B_Cidrs { + if v.Contains(src) { + clog.Debugf("Event from [%s] is whitelisted by CIDR (%s), reason [%s]", src, v, n.Whitelist.Reason) + isWhitelisted = true + } else { + clog.Tracef("whitelist: %s not in [%s]", src, v) + } + hasWhitelist = true + } + } + + if isWhitelisted { + p.Whitelisted = true + } + /* run whitelist expression tests anyway */ + for eidx, e := range n.Whitelist.B_Exprs { + output, err := expr.Run(e.Filter, cachedExprEnv) + if err != nil { + clog.Warningf("failed to run whitelist expr : %v", err) + clog.Debug("Event leaving node : ko") + return false, nil + } + switch out := output.(type) { + case bool: + if n.Debug { + e.ExprDebugger.Run(clog, out, cachedExprEnv) + } + if out { + clog.Debugf("Event is whitelisted by expr, reason [%s]", n.Whitelist.Reason) + p.Whitelisted = true + isWhitelisted = true + } + hasWhitelist = true + default: + log.Errorf("unexpected type %t (%v) while running '%s'", output, output, n.Whitelist.Exprs[eidx]) + } + } + if isWhitelisted { + p.WhitelistReason = n.Whitelist.Reason + /*huglily wipe the ban order if the event is whitelisted and it's an overflow */ + if p.Type == types.OVFLW { /*don't do this at home kids */ + ips := []string{} + for _, src := range srcs { + ips = append(ips, src.String()) + } + clog.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason) + p.Overflow.Whitelisted = true + } + } + + //Process grok if present, should be exclusive with nodes :) + gstr := "" + if n.Grok.RunTimeRegexp != nil { + clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp) + //for unparsed, parsed etc. set sensible defaults to reduce user hassle + if n.Grok.TargetField != "" { + //it's a hack to avoid using real reflect + if n.Grok.TargetField == "Line.Raw" { + gstr = p.Line.Raw + } else if val, ok := p.Parsed[n.Grok.TargetField]; ok { + gstr = val + } else { + clog.Debugf("(%s) target field '%s' doesn't exist in %v", n.rn, n.Grok.TargetField, p.Parsed) + NodeState = false + } + } else if n.Grok.RunTimeValue != nil { + output, err := expr.Run(n.Grok.RunTimeValue, cachedExprEnv) + if err != nil { + clog.Warningf("failed to run RunTimeValue : %v", err) + NodeState = false + } + switch out := output.(type) { + case string: + gstr = out + default: + clog.Errorf("unexpected return type for RunTimeValue : %T", output) + } + } + + var groklabel string + if n.Grok.RegexpName == "" { + groklabel = fmt.Sprintf("%5.5s...", n.Grok.RegexpValue) + } else { + groklabel = n.Grok.RegexpName + } + grok := n.Grok.RunTimeRegexp.Parse(gstr) + if len(grok) > 0 { + /*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/ + NodeHasOKGrok = true + clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok)) + //We managed to grok stuff, merged into parse + for k, v := range grok { + clog.Debugf("\t.Parsed['%s'] = '%s'", k, v) + p.Parsed[k] = v + } + // if the grok succeed, process associated statics + err := n.ProcessStatics(n.Grok.Statics, p) + if err != nil { + clog.Errorf("(%s) Failed to process statics : %v", n.rn, err) + return false, err + } + } else { + //grok failed, node failed + clog.Debugf("+ Grok '%s' didn't return data on '%s'", groklabel, gstr) + NodeState = false + } + + } else { + clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp) + } + + //Iterate on leafs + if len(n.LeavesNodes) > 0 { + for _, leaf := range n.LeavesNodes { + ret, err := leaf.process(p, ctx, cachedExprEnv) + if err != nil { + clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err) + clog.Debugf("Event leaving node : ko") + return false, err + } + clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) + if ret { + NodeState = true + /* if child is successful, stop processing */ + if n.OnSuccess == "next_stage" { + clog.Debugf("child is success, OnSuccess=next_stage, skip") + break + } + } else if !NodeHasOKGrok { + /* + If the parent node has a successful grok pattern, it's state will stay successful even if one or more chil fails. + If the parent node is a skeleton node (no grok pattern), then at least one child must be successful for it to be a success. + */ + NodeState = false + } + } + } + /*todo : check if a node made the state change ?*/ + /* should the childs inherit the on_success behavior */ + + clog.Tracef("State after nodes : %v", NodeState) + + //grok or leafs failed, don't process statics + if !NodeState { + if n.Name != "" { + NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() + } + clog.Debugf("Event leaving node : ko") + return NodeState, nil + } + + if n.Name != "" { + NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() + } + /* + Please kill me. this is to apply statics when the node *has* whitelists that successfully matched the node. + */ + if hasWhitelist && isWhitelisted && len(n.Statics) > 0 || len(n.Statics) > 0 && !hasWhitelist { + clog.Debugf("+ Processing %d statics", len(n.Statics)) + // if all else is good in whitelist, process node's statics + err := n.ProcessStatics(n.Statics, p) + if err != nil { + clog.Errorf("Failed to process statics : %v", err) + return false, err + } + } else { + clog.Tracef("! No node statics") + } + + if NodeState { + clog.Debugf("Event leaving node : ok") + log.Tracef("node is successful, check strategy") + if n.OnSuccess == "next_stage" { + idx := stageidx(p.Stage, ctx.Stages) + //we're at the last stage + if idx+1 == len(ctx.Stages) { + clog.Debugf("node reached the last stage : %s", p.Stage) + } else { + clog.Debugf("move Event from stage %s to %s", p.Stage, ctx.Stages[idx+1]) + p.Stage = ctx.Stages[idx+1] + } + } else { + clog.Tracef("no strategy on success (%s), continue !", n.OnSuccess) + } + } else { + clog.Debugf("Event leaving node : ko") + } + clog.Tracef("Node successful, continue") + return NodeState, nil +} + +func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { + var err error + var valid bool + + valid = false + + dumpr := spew.ConfigState{MaxDepth: 1, DisablePointerAddresses: true} + n.rn = seed.Generate() + + n.EnrichFunctions = ectx + log.Tracef("compile, node is %s", n.Stage) + /* if the node has debugging enabled, create a specific logger with debug + that will be used only for processing this node ;) */ + if n.Debug { + var clog = logrus.New() + if err := types.ConfigureLogger(clog); err != nil { + log.Fatalf("While creating bucket-specific logger : %s", err) + } + clog.SetLevel(log.DebugLevel) + n.Logger = clog.WithFields(log.Fields{ + "id": n.rn, + }) + n.Logger.Infof("%s has debug enabled", n.Name) + } else { + /* else bind it to the default one (might find something more elegant here)*/ + n.Logger = log.WithFields(log.Fields{ + "id": n.rn, + }) + } + + /* display info about top-level nodes, they should be the only one with explicit stage name ?*/ + n.Logger = n.Logger.WithFields(log.Fields{"stage": n.Stage, "name": n.Name}) + + n.Logger.Tracef("Compiling : %s", dumpr.Sdump(n)) + + //compile filter if present + if n.Filter != "" { + n.RunTimeFilter, err = expr.Compile(n.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + return fmt.Errorf("compilation of '%s' failed: %v", n.Filter, err) + } + + if n.Debug { + n.ExprDebugger, err = exprhelpers.NewDebugger(n.Filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + log.Errorf("unable to build debug filter for '%s' : %s", n.Filter, err) + } + } + + } + + /* handle pattern_syntax and groks */ + for _, pattern := range n.SubGroks { + n.Logger.Tracef("Adding subpattern '%s' : '%s'", pattern.Key, pattern.Value) + if err := pctx.Grok.Add(pattern.Key.(string), pattern.Value.(string)); err != nil { + if err == grokky.ErrAlreadyExist { + n.Logger.Warningf("grok '%s' already registred", pattern.Key) + continue + } + n.Logger.Errorf("Unable to compile subpattern %s : %v", pattern.Key, err) + return err + } + } + + /* load grok by name or compile in-place */ + if n.Grok.RegexpName != "" { + n.Logger.Tracef("+ Regexp Compilation '%s'", n.Grok.RegexpName) + n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName) + if err != nil { + return fmt.Errorf("Unable to find grok '%s' : %v", n.Grok.RegexpName, err) + } + if n.Grok.RunTimeRegexp == nil { + return fmt.Errorf("Empty grok '%s'", n.Grok.RegexpName) + } + n.Logger.Tracef("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.Regexp.String()) + valid = true + } else if n.Grok.RegexpValue != "" { + if strings.HasSuffix(n.Grok.RegexpValue, "\n") { + n.Logger.Debugf("Beware, pattern ends with \\n : '%s'", n.Grok.RegexpValue) + } + n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue) + if err != nil { + return fmt.Errorf("Failed to compile grok '%s': %v\n", n.Grok.RegexpValue, err) + } + if n.Grok.RunTimeRegexp == nil { + // We shouldn't be here because compilation succeeded, so regexp shouldn't be nil + return fmt.Errorf("Grok compilation failure: %s", n.Grok.RegexpValue) + } + n.Logger.Tracef("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.Regexp.String()) + valid = true + } + + /*if grok source is an expression*/ + if n.Grok.ExpValue != "" { + n.Grok.RunTimeValue, err = expr.Compile(n.Grok.ExpValue, + expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + return errors.Wrap(err, "while compiling grok's expression") + } + } + + /* load grok statics */ + if len(n.Grok.Statics) > 0 { + //compile expr statics if present + for idx := range n.Grok.Statics { + if n.Grok.Statics[idx].ExpValue != "" { + n.Grok.Statics[idx].RunTimeValue, err = expr.Compile(n.Grok.Statics[idx].ExpValue, + expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + return err + } + } + } + valid = true + } + /* compile leafs if present */ + if len(n.LeavesNodes) > 0 { + for idx := range n.LeavesNodes { + if n.LeavesNodes[idx].Name == "" { + n.LeavesNodes[idx].Name = fmt.Sprintf("child-%s", n.Name) + } + /*propagate debug/stats to child nodes*/ + if !n.LeavesNodes[idx].Debug && n.Debug { + n.LeavesNodes[idx].Debug = true + } + if !n.LeavesNodes[idx].Profiling && n.Profiling { + n.LeavesNodes[idx].Profiling = true + } + n.LeavesNodes[idx].Stage = n.Stage + err = n.LeavesNodes[idx].compile(pctx, ectx) + if err != nil { + return err + } + } + valid = true + } + /* load statics if present */ + for idx := range n.Statics { + if n.Statics[idx].ExpValue != "" { + n.Statics[idx].RunTimeValue, err = expr.Compile(n.Statics[idx].ExpValue, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + n.Logger.Errorf("Statics Compilation failed %v.", err) + return err + } + } + valid = true + } + + /* compile whitelists if present */ + for _, v := range n.Whitelist.Ips { + n.Whitelist.B_Ips = append(n.Whitelist.B_Ips, net.ParseIP(v)) + n.Logger.Debugf("adding ip %s to whitelists", net.ParseIP(v)) + valid = true + } + for _, v := range n.Whitelist.Cidrs { + _, tnet, err := net.ParseCIDR(v) + if err != nil { + n.Logger.Fatalf("Unable to parse cidr whitelist '%s' : %v.", v, err) + } + n.Whitelist.B_Cidrs = append(n.Whitelist.B_Cidrs, tnet) + n.Logger.Debugf("adding cidr %s to whitelists", tnet) + valid = true + } + for _, filter := range n.Whitelist.Exprs { + expression := &ExprWhitelist{} + expression.Filter, err = expr.Compile(filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + n.Logger.Fatalf("Unable to compile whitelist expression '%s' : %v.", filter, err) + } + expression.ExprDebugger, err = exprhelpers.NewDebugger(filter, expr.Env(exprhelpers.GetExprEnv(map[string]interface{}{"evt": &types.Event{}}))) + if err != nil { + log.Errorf("unable to build debug filter for '%s' : %s", filter, err) + } + n.Whitelist.B_Exprs = append(n.Whitelist.B_Exprs, expression) + n.Logger.Debugf("adding expression %s to whitelists", filter) + valid = true + } + + if !valid { + /* node is empty, error force return */ + n.Logger.Error("Node is empty or invalid, abort") + n.Stage = "" + return fmt.Errorf("Node is empty") + } + + if err := n.validate(pctx, ectx); err != nil { + return err + } + return nil +} diff --git a/pkg/parser/node_test.go b/pkg/parser/node_test.go new file mode 100644 index 0000000..f529cb4 --- /dev/null +++ b/pkg/parser/node_test.go @@ -0,0 +1,69 @@ +package parser + +import ( + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/types" + yaml "gopkg.in/yaml.v2" +) + +func TestParserConfigs(t *testing.T) { + pctx, err := Init(map[string]interface{}{"patterns": "../../config/patterns/", "data": "./tests/"}) + if err != nil { + t.Fatalf("unable to load patterns : %s", err) + } + + /*the actual tests*/ + var CfgTests = []struct { + NodeCfg *Node + Compiles bool + Valid bool + }{ + //valid node with grok pattern + {&Node{Debug: true, Stage: "s00", Grok: types.GrokPattern{RegexpValue: "^x%{DATA:extr}$", TargetField: "t"}}, true, true}, + //bad filter + {&Node{Debug: true, Stage: "s00", Filter: "ratata"}, false, false}, + //empty node + {&Node{Debug: true, Stage: "s00", Filter: "true"}, false, false}, + //bad subgrok + {&Node{Debug: true, Stage: "s00", SubGroks: yaml.MapSlice{{Key: string("FOOBAR"), Value: string("[a-$")}}}, false, true}, + //valid node with grok pattern + {&Node{Debug: true, Stage: "s00", SubGroks: yaml.MapSlice{{Key: string("FOOBAR"), Value: string("[a-z]")}}, Grok: types.GrokPattern{RegexpValue: "^x%{FOOBAR:extr}$", TargetField: "t"}}, true, true}, + //bad node success + {&Node{Debug: true, Stage: "s00", OnSuccess: "ratat", Grok: types.GrokPattern{RegexpValue: "^x%{DATA:extr}$", TargetField: "t"}}, false, false}, + //ok node success + {&Node{Debug: true, Stage: "s00", OnSuccess: "continue", Grok: types.GrokPattern{RegexpValue: "^x%{DATA:extr}$", TargetField: "t"}}, true, true}, + //valid node with grok sub-pattern used by name + {&Node{Debug: true, Stage: "s00", SubGroks: yaml.MapSlice{{Key: string("FOOBARx"), Value: string("[a-z] %{DATA:lol}$")}}, Grok: types.GrokPattern{RegexpName: "FOOBARx", TargetField: "t"}}, true, true}, + //node with unexisting grok pattern + {&Node{Debug: true, Stage: "s00", Grok: types.GrokPattern{RegexpName: "RATATA", TargetField: "t"}}, false, true}, + //node with grok pattern dependencies + {&Node{Debug: true, Stage: "s00", SubGroks: yaml.MapSlice{ + {Key: string("SUBGROK"), Value: string("[a-z]")}, + {Key: string("MYGROK"), Value: string("[a-z]%{SUBGROK}")}, + }, Grok: types.GrokPattern{RegexpValue: "^x%{MYGROK:extr}$", TargetField: "t"}}, true, true}, + //node with broken grok pattern dependencies + {&Node{Debug: true, Stage: "s00", SubGroks: yaml.MapSlice{ + {Key: string("SUBGROKBIS"), Value: string("[a-z]%{MYGROKBIS}")}, + {Key: string("MYGROKBIS"), Value: string("[a-z]")}, + }, Grok: types.GrokPattern{RegexpValue: "^x%{MYGROKBIS:extr}$", TargetField: "t"}}, false, true}, + } + for idx := range CfgTests { + err := CfgTests[idx].NodeCfg.compile(pctx, EnricherCtx{}) + if CfgTests[idx].Compiles == true && err != nil { + t.Fatalf("Compile: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) + } + if CfgTests[idx].Compiles == false && err == nil { + t.Fatalf("Compile: (%d/%d) expected error", idx+1, len(CfgTests)) + } + + err = CfgTests[idx].NodeCfg.validate(pctx, EnricherCtx{}) + if CfgTests[idx].Valid == true && err != nil { + t.Fatalf("Valid: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) + } + if CfgTests[idx].Valid == false && err == nil { + t.Fatalf("Valid: (%d/%d) expected error", idx+1, len(CfgTests)) + } + + } +} diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go new file mode 100644 index 0000000..bebee33 --- /dev/null +++ b/pkg/parser/parsing_test.go @@ -0,0 +1,416 @@ +package parser + +import ( + "bytes" + "fmt" + "html/template" + "io" + "os" + "sort" + "strings" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +type TestFile struct { + Lines []types.Event `yaml:"lines,omitempty"` + Results []types.Event `yaml:"results,omitempty"` +} + +var debug bool = false + +func TestParser(t *testing.T) { + debug = true + log.SetLevel(log.InfoLevel) + var envSetting = os.Getenv("TEST_ONLY") + pctx, ectx, err := prepTests() + if err != nil { + t.Fatalf("failed to load env : %s", err) + } + //Init the enricher + if envSetting != "" { + if err := testOneParser(pctx, ectx, envSetting, nil); err != nil { + t.Fatalf("Test '%s' failed : %s", envSetting, err) + } + } else { + fds, err := os.ReadDir("./tests/") + if err != nil { + t.Fatalf("Unable to read test directory : %s", err) + } + for _, fd := range fds { + if !fd.IsDir() { + continue + } + fname := "./tests/" + fd.Name() + log.Infof("Running test on %s", fname) + if err := testOneParser(pctx, ectx, fname, nil); err != nil { + t.Fatalf("Test '%s' failed : %s", fname, err) + } + } + } + +} + +func BenchmarkParser(t *testing.B) { + log.Printf("start bench !!!!") + debug = false + log.SetLevel(log.ErrorLevel) + pctx, ectx, err := prepTests() + if err != nil { + t.Fatalf("failed to load env : %s", err) + } + var envSetting = os.Getenv("TEST_ONLY") + + if envSetting != "" { + if err := testOneParser(pctx, ectx, envSetting, t); err != nil { + t.Fatalf("Test '%s' failed : %s", envSetting, err) + } + } else { + fds, err := os.ReadDir("./tests/") + if err != nil { + t.Fatalf("Unable to read test directory : %s", err) + } + for _, fd := range fds { + if !fd.IsDir() { + continue + } + fname := "./tests/" + fd.Name() + log.Infof("Running test on %s", fname) + if err := testOneParser(pctx, ectx, fname, t); err != nil { + t.Fatalf("Test '%s' failed : %s", fname, err) + } + } + } +} + +func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing.B) error { + + var ( + err error + pnodes []Node + + parser_configs []Stagefile + ) + log.Warningf("testing %s", dir) + parser_cfg_file := fmt.Sprintf("%s/parsers.yaml", dir) + cfg, err := os.ReadFile(parser_cfg_file) + if err != nil { + return fmt.Errorf("failed opening %s : %s", parser_cfg_file, err) + } + tmpl, err := template.New("test").Parse(string(cfg)) + if err != nil { + return fmt.Errorf("failed to parse template %s : %s", cfg, err) + } + var out bytes.Buffer + err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) + if err != nil { + panic(err) + } + if err := yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil { + return fmt.Errorf("failed unmarshaling %s : %s", parser_cfg_file, err) + } + + pnodes, err = LoadStages(parser_configs, pctx, ectx) + if err != nil { + return fmt.Errorf("unable to load parser config : %s", err) + } + + //TBD: Load post overflows + //func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool { + parser_test_file := fmt.Sprintf("%s/test.yaml", dir) + tests := loadTestFile(parser_test_file) + count := 1 + if b != nil { + count = b.N + b.ResetTimer() + } + for n := 0; n < count; n++ { + if testFile(tests, *pctx, pnodes) != true { + return fmt.Errorf("test failed !") + } + } + return nil +} + +//prepTests is going to do the initialisation of parser : it's going to load enrichment plugins and load the patterns. This is done here so that we don't redo it for each test +func prepTests() (*UnixParserCtx, EnricherCtx, error) { + var ( + err error + pctx *UnixParserCtx + ectx EnricherCtx + ) + + err = exprhelpers.Init(nil) + if err != nil { + log.Fatalf("exprhelpers init failed: %s", err) + } + + //Load enrichment + datadir := "./test_data/" + ectx, err = Loadplugin(datadir) + if err != nil { + log.Fatalf("failed to load plugin geoip : %v", err) + } + log.Printf("Loaded -> %+v", ectx) + + //Load the parser patterns + cfgdir := "../../config/" + + /* this should be refactored to 2 lines :p */ + // Init the parser + pctx, err = Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/"), "data": "./tests/"}) + if err != nil { + return nil, ectx, fmt.Errorf("failed to initialize parser : %v", err) + } + return pctx, ectx, nil +} + +func loadTestFile(file string) []TestFile { + yamlFile, err := os.Open(file) + if err != nil { + log.Fatalf("yamlFile.Get err #%v ", err) + } + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + var testSet []TestFile + for { + tf := TestFile{} + err := dec.Decode(&tf) + if err != nil { + if err == io.EOF { + break + } + log.Fatalf("Failed to load testfile '%s' yaml error : %v", file, err) + return nil + } + testSet = append(testSet, tf) + } + return testSet +} + +func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bool) { + var retInfo []string + var valid bool = false + expectMaps := []map[string]string{expected.Parsed, expected.Meta, expected.Enriched} + outMaps := []map[string]string{out.Parsed, out.Meta, out.Enriched} + outLabels := []string{"Parsed", "Meta", "Enriched"} + + //allow to check as well for stage and processed flags + if expected.Stage != "" { + if expected.Stage != out.Stage { + if debug { + retInfo = append(retInfo, fmt.Sprintf("mismatch stage %s != %s", expected.Stage, out.Stage)) + } + goto checkFinished + } else { + valid = true + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok stage %s == %s", expected.Stage, out.Stage)) + } + } + } + + if expected.Process != out.Process { + if debug { + retInfo = append(retInfo, fmt.Sprintf("mismatch process %t != %t", expected.Process, out.Process)) + } + goto checkFinished + } else { + valid = true + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok process %t == %t", expected.Process, out.Process)) + } + } + + if expected.Whitelisted != out.Whitelisted { + if debug { + retInfo = append(retInfo, fmt.Sprintf("mismatch whitelist %t != %t", expected.Whitelisted, out.Whitelisted)) + } + goto checkFinished + } else { + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok whitelist %t == %t", expected.Whitelisted, out.Whitelisted)) + } + valid = true + } + + for mapIdx := 0; mapIdx < len(expectMaps); mapIdx++ { + for expKey, expVal := range expectMaps[mapIdx] { + if outVal, ok := outMaps[mapIdx][expKey]; ok { + if outVal == expVal { //ok entry + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok %s[%s] %s == %s", outLabels[mapIdx], expKey, expVal, outVal)) + } + valid = true + } else { //mismatch entry + if debug { + retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal)) + } + goto checkFinished + } + } else { //missing entry + if debug { + retInfo = append(retInfo, fmt.Sprintf("missing entry %s[%s]", outLabels[mapIdx], expKey)) + } + valid = false + goto checkFinished + } + } + } +checkFinished: + if valid { + if debug { + retInfo = append(retInfo, fmt.Sprintf("OK ! %s", strings.Join(retInfo, "/"))) + } + } else { + if debug { + retInfo = append(retInfo, fmt.Sprintf("KO ! %s", strings.Join(retInfo, "/"))) + } + } + return retInfo, valid +} + +func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error) { + var results []types.Event + + for _, in := range testSet.Lines { + out, err := Parse(pctx, in, nodes) + if err != nil { + log.Errorf("Failed to process %s : %v", spew.Sdump(in), err) + } + //log.Infof("Parser output : %s", spew.Sdump(out)) + results = append(results, out) + } + log.Infof("parsed %d lines", len(testSet.Lines)) + log.Infof("got %d results", len(results)) + + /* + check the results we got against the expected ones + only the keys of the expected part are checked against result + */ + if len(testSet.Results) == 0 && len(results) == 0 { + log.Fatal("No results, no tests, abort.") + return false, fmt.Errorf("no tests, no results") + } + +reCheck: + failinfo := []string{} + for ridx, result := range results { + for eidx, expected := range testSet.Results { + explain, match := matchEvent(expected, result, debug) + if match == true { + log.Infof("expected %d/%d matches result %d/%d", eidx, len(testSet.Results), ridx, len(results)) + if len(explain) > 0 { + log.Printf("-> %s", explain[len(explain)-1]) + } + //don't do this at home : delete current element from list and redo + results[len(results)-1], results[ridx] = results[ridx], results[len(results)-1] + results = results[:len(results)-1] + + testSet.Results[len(testSet.Results)-1], testSet.Results[eidx] = testSet.Results[eidx], testSet.Results[len(testSet.Results)-1] + testSet.Results = testSet.Results[:len(testSet.Results)-1] + + goto reCheck + } else { + failinfo = append(failinfo, explain...) + } + } + } + if len(results) > 0 { + log.Printf("Errors : %s", strings.Join(failinfo, " / ")) + return false, fmt.Errorf("leftover results : %+v", results) + } + if len(testSet.Results) > 0 { + log.Printf("Errors : %s", strings.Join(failinfo, " / ")) + return false, fmt.Errorf("leftover expected results : %+v", testSet.Results) + } + return true, nil +} + +func testFile(testSet []TestFile, pctx UnixParserCtx, nodes []Node) bool { + log.Warning("Going to process one test set") + for _, tf := range testSet { + //func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error) { + testOk, err := testSubSet(tf, pctx, nodes) + if err != nil { + log.Fatalf("test failed : %s", err) + } + if !testOk { + log.Fatalf("failed test : %+v", tf) + } + } + return true +} + +/*THIS IS ONLY PRESENT TO BE ABLE TO GENERATE DOCUMENTATION OF EXISTING PATTERNS*/ +type Pair struct { + Key string + Value string +} + +type PairList []Pair + +func (p PairList) Len() int { return len(p) } +func (p PairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p PairList) Less(i, j int) bool { return len(p[i].Value) < len(p[j].Value) } + +func TestGeneratePatternsDoc(t *testing.T) { + if os.Getenv("GO_WANT_TEST_DOC") != "1" { + return + } + + pctx, err := Init(map[string]interface{}{"patterns": "../../config/patterns/", "data": "./tests/"}) + if err != nil { + t.Fatalf("unable to load patterns : %s", err) + } + log.Infof("-> %s", spew.Sdump(pctx)) + /*don't judge me, we do it for the users*/ + p := make(PairList, len(pctx.Grok)) + + i := 0 + for key, val := range pctx.Grok { + p[i] = Pair{key, val} + p[i].Value = strings.ReplaceAll(p[i].Value, "{%{", "\\{\\%\\{") + i++ + } + sort.Sort(p) + + f, err := os.OpenFile("./patterns-documentation.md", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Fatalf("failed to open : %s", err) + } + if _, err := f.WriteString("# Patterns documentation\n\n"); err != nil { + t.Fatal("failed to write to file") + } + if _, err := f.WriteString("You will find here a generated documentation of all the patterns loaded by crowdsec.\n"); err != nil { + t.Fatal("failed to write to file") + } + if _, err := f.WriteString("They are sorted by pattern length, and are meant to be used in parsers, in the form %{PATTERN_NAME}.\n"); err != nil { + t.Fatal("failed to write to file") + } + if _, err := f.WriteString("\n\n"); err != nil { + t.Fatal("failed to write to file") + } + for _, k := range p { + if _, err := f.WriteString(fmt.Sprintf("## %s\n\nPattern :\n```\n%s\n```\n\n", k.Key, k.Value)); err != nil { + t.Fatal("failed to write to file") + } + fmt.Printf("%v\t%v\n", k.Key, k.Value) + } + if _, err := f.WriteString("\n"); err != nil { + t.Fatal("failed to write to file") + } + if _, err := f.WriteString("# Documentation generation\n"); err != nil { + t.Fatal("failed to write to file") + } + if _, err := f.WriteString("This documentation is generated by `pkg/parser` : `GO_WANT_TEST_DOC=1 go test -run TestGeneratePatternsDoc`\n"); err != nil { + t.Fatal("failed to write to file") + } + f.Close() + +} diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go new file mode 100644 index 0000000..69e448b --- /dev/null +++ b/pkg/parser/runtime.go @@ -0,0 +1,353 @@ +package parser + +/* + This file contains + - the runtime parsing routines +*/ + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" + + "strconv" + + "github.com/mohae/deepcopy" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + + "github.com/antonmedv/expr" +) + +/* ok, this is kinda experimental, I don't know how bad of an idea it is .. */ +func SetTargetByName(target string, value string, evt *types.Event) bool { + + if evt == nil { + return false + } + + //it's a hack, we do it for the user + target = strings.TrimPrefix(target, "evt.") + + log.Debugf("setting target %s to %s", target, value) + defer func() { + if r := recover(); r != nil { + log.Errorf("Runtime error while trying to set '%s': %+v", target, r) + return + } + }() + + iter := reflect.ValueOf(evt).Elem() + if (iter == reflect.Value{}) || iter.IsZero() { + log.Tracef("event is nill") + //event is nill + return false + } + for _, f := range strings.Split(target, ".") { + /* + ** According to current Event layout we only have to handle struct and map + */ + switch iter.Kind() { + case reflect.Map: + tmp := iter.MapIndex(reflect.ValueOf(f)) + /*if we're in a map and the field doesn't exist, the user wants to add it :) */ + if (tmp == reflect.Value{}) || tmp.IsZero() { + log.Debugf("map entry is zero in '%s'", target) + } + iter.SetMapIndex(reflect.ValueOf(f), reflect.ValueOf(value)) + return true + case reflect.Struct: + tmp := iter.FieldByName(f) + if !tmp.IsValid() { + log.Debugf("'%s' is not a valid target because '%s' is not valid", target, f) + return false + } + if tmp.Kind() == reflect.Ptr { + tmp = reflect.Indirect(tmp) + } + iter = tmp + //nolint: gosimple + break + case reflect.Ptr: + tmp := iter.Elem() + iter = reflect.Indirect(tmp.FieldByName(f)) + default: + log.Errorf("unexpected type %s in '%s'", iter.Kind(), target) + return false + } + } + //now we should have the final member :) + if !iter.CanSet() { + log.Errorf("'%s' can't be set", target) + return false + } + if iter.Kind() != reflect.String { + log.Errorf("Expected string, got %v when handling '%s'", iter.Kind(), target) + return false + } + iter.Set(reflect.ValueOf(value)) + return true +} + +func printStaticTarget(static types.ExtraField) string { + + if static.Method != "" { + return static.Method + } else if static.Parsed != "" { + return fmt.Sprintf(".Parsed[%s]", static.Parsed) + } else if static.Meta != "" { + return fmt.Sprintf(".Meta[%s]", static.Meta) + } else if static.Enriched != "" { + return fmt.Sprintf(".Enriched[%s]", static.Enriched) + } else if static.TargetByName != "" { + return static.TargetByName + } else { + return "?" + } +} + +func (n *Node) ProcessStatics(statics []types.ExtraField, event *types.Event) error { + //we have a few cases : + //(meta||key) + (static||reference||expr) + var value string + clog := n.Logger + + cachedExprEnv := exprhelpers.GetExprEnv(map[string]interface{}{"evt": event}) + + for _, static := range statics { + value = "" + if static.Value != "" { + value = static.Value + } else if static.RunTimeValue != nil { + output, err := expr.Run(static.RunTimeValue, cachedExprEnv) + if err != nil { + clog.Warningf("failed to run RunTimeValue : %v", err) + continue + } + switch out := output.(type) { + case string: + value = out + case int: + value = strconv.Itoa(out) + case map[string]interface{}: + clog.Warnf("Expression returned a map, please use ToJsonString() to convert it to string if you want to keep it as is, or refine your expression to extract a string") + case []interface{}: + clog.Warnf("Expression returned a map, please use ToJsonString() to convert it to string if you want to keep it as is, or refine your expression to extract a string") + default: + clog.Errorf("unexpected return type for RunTimeValue : %T", output) + return errors.New("unexpected return type for RunTimeValue") + } + } + + if value == "" { + //allow ParseDate to have empty input + if static.Method != "ParseDate" { + clog.Debugf("Empty value for %s, skip.", printStaticTarget(static)) + continue + } + } + + if static.Method != "" { + processed := false + /*still way too hackish, but : inject all the results in enriched, and */ + if enricherPlugin, ok := n.EnrichFunctions.Registered[static.Method]; ok { + clog.Tracef("Found method '%s'", static.Method) + ret, err := enricherPlugin.EnrichFunc(value, event, enricherPlugin.Ctx, n.Logger) + if err != nil { + clog.Errorf("method '%s' returned an error : %v", static.Method, err) + } + processed = true + clog.Debugf("+ Method %s('%s') returned %d entries to merge in .Enriched\n", static.Method, value, len(ret)) + if len(ret) == 0 { + clog.Debugf("+ Method '%s' empty response on '%s'", static.Method, value) + } + for k, v := range ret { + clog.Debugf("\t.Enriched[%s] = '%s'\n", k, v) + event.Enriched[k] = v + } + } else { + clog.Debugf("method '%s' doesn't exist or plugin not initialized", static.Method) + } + if !processed { + clog.Debugf("method '%s' doesn't exist", static.Method) + } + } else if static.Parsed != "" { + clog.Debugf(".Parsed[%s] = '%s'", static.Parsed, value) + event.Parsed[static.Parsed] = value + } else if static.Meta != "" { + clog.Debugf(".Meta[%s] = '%s'", static.Meta, value) + event.Meta[static.Meta] = value + } else if static.Enriched != "" { + clog.Debugf(".Enriched[%s] = '%s'", static.Enriched, value) + event.Enriched[static.Enriched] = value + } else if static.TargetByName != "" { + if !SetTargetByName(static.TargetByName, value, event) { + clog.Errorf("Unable to set value of '%s'", static.TargetByName) + } else { + clog.Debugf("%s = '%s'", static.TargetByName, value) + } + } else { + clog.Fatal("unable to process static : unknown target") + } + + } + return nil +} + +var NodesHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_node_hits_total", + Help: "Total events entered node.", + }, + []string{"source", "type", "name"}, +) + +var NodesHitsOk = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_node_hits_ok_total", + Help: "Total events successfully exited node.", + }, + []string{"source", "type", "name"}, +) + +var NodesHitsKo = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_node_hits_ko_total", + Help: "Total events unsuccessfully exited node.", + }, + []string{"source", "type", "name"}, +) + +func stageidx(stage string, stages []string) int { + for i, v := range stages { + if stage == v { + return i + } + } + return -1 +} + +type ParserResult struct { + Evt types.Event + Success bool +} + +var ParseDump bool +var DumpFolder string +var StageParseCache map[string]map[string][]ParserResult + +func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) { + var event types.Event = xp + + /* the stage is undefined, probably line is freshly acquired, set to first stage !*/ + if event.Stage == "" && len(ctx.Stages) > 0 { + event.Stage = ctx.Stages[0] + log.Tracef("no stage, set to : %s", event.Stage) + } + event.Process = false + if event.Time.IsZero() { + event.Time = time.Now().UTC() + } + + if event.Parsed == nil { + event.Parsed = make(map[string]string) + } + if event.Enriched == nil { + event.Enriched = make(map[string]string) + } + if event.Meta == nil { + event.Meta = make(map[string]string) + } + if event.Type == types.LOG { + log.Tracef("INPUT '%s'", event.Line.Raw) + } + + cachedExprEnv := exprhelpers.GetExprEnv(map[string]interface{}{"evt": &event}) + + if ParseDump { + if StageParseCache == nil { + StageParseCache = make(map[string]map[string][]ParserResult) + StageParseCache["success"] = make(map[string][]ParserResult) + StageParseCache["success"][""] = make([]ParserResult, 0) + } + } + + for _, stage := range ctx.Stages { + if ParseDump { + if _, ok := StageParseCache[stage]; !ok { + StageParseCache[stage] = make(map[string][]ParserResult) + } + } + /* if the node is forward in stages, seek to this stage */ + /* this is for example used by testing system to inject logs in post-syslog-parsing phase*/ + if stageidx(event.Stage, ctx.Stages) > stageidx(stage, ctx.Stages) { + log.Tracef("skipping stage, we are already at [%s] expecting [%s]", event.Stage, stage) + continue + } + log.Tracef("node stage : %s, current stage : %s", event.Stage, stage) + + /* if the stage is wrong, it means that the log didn't manage "pass" a stage with a onsuccess: next_stage tag */ + if event.Stage != stage { + log.Debugf("Event not parsed, expected stage '%s' got '%s', abort", stage, event.Stage) + event.Process = false + return event, nil + } + + isStageOK := false + for idx, node := range nodes { + //Only process current stage's nodes + if event.Stage != node.Stage { + continue + } + clog := log.WithFields(log.Fields{ + "node-name": node.rn, + "stage": event.Stage, + }) + clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), node.rn) + if ctx.Profiling { + node.Profiling = true + } + ret, err := node.process(&event, ctx, cachedExprEnv) + if err != nil { + clog.Errorf("Error while processing node : %v", err) + return event, err + } + clog.Tracef("node (%s) ret : %v", node.rn, ret) + if ParseDump { + if len(StageParseCache[stage][node.Name]) == 0 { + StageParseCache[stage][node.Name] = make([]ParserResult, 0) + } + evtcopy := deepcopy.Copy(event) + parserInfo := ParserResult{Evt: evtcopy.(types.Event), Success: ret} + StageParseCache[stage][node.Name] = append(StageParseCache[stage][node.Name], parserInfo) + } + if ret { + isStageOK = true + } + if ret && node.OnSuccess == "next_stage" { + clog.Debugf("node successful, stop end stage %s", stage) + break + } + //the parsed object moved onto the next phase + if event.Stage != stage { + clog.Tracef("node moved stage, break and redo") + break + } + } + if !isStageOK { + log.Debugf("Log didn't finish stage %s", event.Stage) + event.Process = false + return event, nil + } + + } + + event.Process = true + return event, nil + +} diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go new file mode 100644 index 0000000..3bc7bdd --- /dev/null +++ b/pkg/parser/stage.go @@ -0,0 +1,135 @@ +package parser + +/* + This file contains + - the runtime definition of parser + - the compilation/parsing routines of parser configuration +*/ + +import ( + //"fmt" + + "fmt" + "io" + _ "net/http/pprof" + "os" + "sort" + "strings" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + + log "github.com/sirupsen/logrus" + + "github.com/goombaio/namegenerator" + yaml "gopkg.in/yaml.v2" +) + +var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()) + +/* + identify generic component to alter maps, smartfilters ? (static, conditional static etc.) +*/ + +type Stagefile struct { + Filename string `yaml:"filename"` + Stage string `yaml:"stage"` +} + +func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) ([]Node, error) { + var nodes []Node + tmpstages := make(map[string]bool) + pctx.Stages = []string{} + + for _, stageFile := range stageFiles { + if !strings.HasSuffix(stageFile.Filename, ".yaml") && !strings.HasSuffix(stageFile.Filename, ".yml") { + log.Warningf("skip non yaml : %s", stageFile.Filename) + continue + } + log.Debugf("loading parser file '%s'", stageFile) + st, err := os.Stat(stageFile.Filename) + if err != nil { + return nil, fmt.Errorf("failed to stat %s : %v", stageFile, err) + } + if st.IsDir() { + continue + } + yamlFile, err := os.Open(stageFile.Filename) + if err != nil { + return nil, fmt.Errorf("can't access parsing configuration file %s : %s", stageFile.Filename, err) + } + //process the yaml + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + nodesCount := 0 + for { + node := Node{} + node.OnSuccess = "continue" //default behavior is to continue + err = dec.Decode(&node) + if err != nil { + if err == io.EOF { + log.Tracef("End of yaml file") + break + } + log.Fatalf("Error decoding parsing configuration file '%s': %v", stageFile.Filename, err) + } + + //check for empty bucket + if node.Name == "" && node.Description == "" && node.Author == "" { + log.Infof("Node in %s has no name,author or description. Skipping.", stageFile.Filename) + continue + } + //check compat + if node.FormatVersion == "" { + log.Tracef("no version in %s, assuming '1.0'", node.Name) + node.FormatVersion = "1.0" + } + ok, err := cwversion.Statisfies(node.FormatVersion, cwversion.Constraint_parser) + if err != nil { + log.Fatalf("Failed to check version : %s", err) + } + if !ok { + log.Errorf("%s : %s doesn't satisfy parser format %s, skip", node.Name, node.FormatVersion, cwversion.Constraint_parser) + continue + } + + node.Stage = stageFile.Stage + if _, ok := tmpstages[stageFile.Stage]; !ok { + tmpstages[stageFile.Stage] = true + } + //compile the node : grok pattern and expression + err = node.compile(pctx, ectx) + if err != nil { + if node.Name != "" { + return nil, fmt.Errorf("failed to compile node '%s' in '%s' : %s", node.Name, stageFile.Filename, err) + } + return nil, fmt.Errorf("failed to compile node in '%s' : %s", stageFile.Filename, err) + } + /* if the stage is empty, the node is empty, it's a trailing entry in users yaml file */ + if node.Stage == "" { + continue + } + + if len(node.Data) > 0 { + for _, data := range node.Data { + err = exprhelpers.FileInit(pctx.DataFolder, data.DestPath, data.Type) + if err != nil { + log.Errorf(err.Error()) + } + } + } + nodes = append(nodes, node) + nodesCount++ + } + log.WithFields(log.Fields{"file": stageFile.Filename, "stage": stageFile.Stage}).Infof("Loaded %d parser nodes", nodesCount) + } + + for k := range tmpstages { + pctx.Stages = append(pctx.Stages, k) + } + sort.Strings(pctx.Stages) + log.Infof("Loaded %d nodes from %d stages", len(nodes), len(pctx.Stages)) + + return nodes, nil +} diff --git a/pkg/parser/test_data/GeoLite2-ASN.mmdb b/pkg/parser/test_data/GeoLite2-ASN.mmdb new file mode 100644 index 0000000000000000000000000000000000000000..bbf6bf2609008fbdbf299620081c68149a5f5be8 GIT binary patch literal 3168 zcmZYA2Y3@@7{KxON*S{EDurXOQlRW1dv#K1filB2xt6p^E?h2!7G&ezihF=taqq2E zapT^bI#Ch#7X9x2HL2RCPk(vW_kDMlJ}07yXd6XR6gx#4DW;305;cY<&=i^}juBD6 z?m%;B0WF~ww1zg&7TQ63=l~s|6Lf|y&=tBtcSwUCkPbbe7wFI%`aoak2mN6H41_^2 z7>2-5$beyx3BzFojD##01*2gMjD>M99wxvheB8&BEN|&Br1YpSOkk<36#K6SO&{s1+0Ws;Dgm*sQpeBDOGF| z@skXIsgjiV(vYFDV}Q;{nbFN$16@@lw7C7%|#R`GzybtJEc8(=To2sgpaa0}cDx54dj z2i&QazF*`nqPyW9xEJp8XmGFHj}Io2@(_38VUb4|cogDug>q*oLbm$FzpfB`;{xARr0`EU< zFwqbg3K=jAGGRE3fRT^|qhK_Qfw3?S#=``d2$Ntk@cz@L5KVE#|---@o=eW*QV}Ysr|ob+2uwgSYtRrD>A>>48Gh}S=_T{xwjJ~v5mTQX zGX0MFoZ&U>J#Zx7Sf3w^1Ty_r_|WpwV8pPiePxy%HXL8rsIY8bm1)P+iT0%Yw;GHj ztGjbc*&1BPRi8Zp!7R7cIcxu%sDbj++_(~Amn17^%`2czzl z9-LuDRu28As;jDAY{ndYR{gX-sLZG|b=8nvn5DaGX6WjCy1Cv6M?M9Skvxf#<^)h;;^D`5KkRy^X=YCCgn({I@UUo2Q-?&bvD0OT+Z$&d^rGhrqp1dk1gAormh z5fljbArcPd4(O<>>osv#U0vN(a}jmdV|88c@84DZGBbYPH~h%IySnSW>gwvM>h33m zO$hrkAvC~RA#@>ZVId5z*%{7a=*Dn1x-1sr9Ika|IG5o(hVvONV7QRsB8DCeJsEm2 zT+Gm$p%23)43{!o#?Y7Ha)v7yu4L%P(4WC%NMIPiFpyyo!&MA}8HO-87=|)j&2SCF zFoxj_*D{P?xQ^j^hLH@T7)CSPz;GkOO$;|P+`@1x!)*+=Gu**&C&OI~V;B+{k{FU1 zQW#Ph(iof!=?pH042DdGEQV}`9EPzBxeRw>`X3Y`k8Al11q_7@;~2&>OklW&VIspM zhRF<5817{#Vz`gNErfVih^c_NLKHKxguw&i!$M5sS}AH}WW~~wy-b`=APHgywTW_u z3I-p8Ux>4Z-#H9UB2U|{6~oIx$VCUKSaCy$X$LSIR7Rgeu2-zYWx)d zKk^$OS;jqK8!l`>h-K3lbPxv%n*q2~*z8O^i=i9C*$n3}bZ0o1;XH=(87^SBkl`YR z9(eR6!q$^(y%;WL=*`fFKq?FW71%E8s@eJy(RR78UBL`&-nM>R>mTiDa?b=|8^CP? z83skWT-Ei4AyJ}3*oH=HR}0%U%r~rys3fypD{LdU?K+0*8Af85H}cy@3EOBU-arsD zFxySSb~Cr#($!|WmHBQHw%en`JD7N4wrnQmM2TaWFPGtNaRG~K%jcd2422Bi7{)V9V7P~Y@wprXZJK9#wZ5;nDypC#?WE}r&4|A74Fw`*2WvFGSV|avN z9s|ce+oN1tz_5^E5rMc**p{$JOBt3iEN583u#(|1hQ}FJF|1}-!@%p$R?oF37&!jf z)^qJihNl=dFl=Pl#L&R7nPCgVR)%d1+ZlE+>}1%*u$y5I!_y3V8J=O-$I!^IpWy(* zL54#N&oVs6aG2o;!%+srKFpZyIKv4Bj(;|ef3{OhJk9Vt!wUq`h$Q=q3@>3{F&F}%+32E&^SZ{aDEc%~!5*387W8UD=h7lwDxGgsK&<=T5#TptVD zU%B=^17e>P@NWzsGJM4FcZPouz?`V(Ck&tB(H9F_3!eF%u(bj@S)?|Gc46z_o@WRy zQt$f=J^v+apL5$64F4qP!7J7FCAxegY+rHP*KuwCX5zO5J=w|G{sUR;!uCD*eiF9- za+e<%ek8Eo{xf=N!uAV#{w8`tOSWGLpaM;x&1ThPn+dv7uwm} z<(#-K=Q8oUIN}9h=pnQVx$PnXHE69T6MMxm^ajIdq4fb=CA3Qb{e*TY;Bui|#tePq zo^k~fuOv|F(fWg7fY3~CONe6_$izVeN=w>cFbo&k5Wv+!b8wfTab2!q;;=a4wP3hj zXd}4oIs$8aBbhiVj(7tY5`}gn;0~eP1h`dbH*?Qh2rQ9qW8&?xM9h!S?iAWx+%|^5 zdLK+fa|taOkS4Sg?vhH-YZ}*_4Cw^ci!#8FEwoH-%Zh8uVdB_WA_;M~(DH~#wR~Cu zT7l3Cqr`Dc9M3R8X!r2LCU$j^yu8M=DP2VEUZE9nPu1q;+Ej*Oh7tyje1pU|D$or}6$zc)KpNhroA2Xb%IP651aC%Y;?~cvNU}0gni+mf7o={SmIs zW0+6S2iAkZX$!!xSZE8mwusx5UY0PCzzwoBG=msJd_ z39Qjdb6Y30dTx7y;8HdQjpLv8Bomb^h=1B%p=|_g7uqJk7NIpT`(}d6ZspomhHV6W zb*}9I!)~D|eeU8m<(Kwwm!}CXf1Yd4fZ}-B#4)@LhWCZ`XTZBcQ(Ah5y7Vh!hW8l$ zN?;A;12B9fG>(7Thty{2h2x+04v~ zIE+9|oR0XXUoZ3#+;$y70=oo#BojvwSek&G`t3r$5pavpc~bS8sf+a@b`bh)M6}d; z2N=c({Z4-7U2)G$WMWbrF$E08LQe%02t5suEp#U!L+I(u;EH3&WMUS9sdR?;r{68~ zvD}s$$B@Uwd;;Zd^g=L97Wz2AJwhMPT_z9=n9Q|_43h{fMNR?3eL}yN+lmORnRYXA zDuJcQ5-pSfy8hp;G8RZu7@6%w%GKU?6NzT2B=i z9uRs6Fk9&NGjSGywT?Lc>2ru^Y4AZX{6XjsF~h?I)}U*cIG4bBN*(SV9u+#)t${T* z5A3f9eLi5D&>sb?5&8naa-lB-ED`!5e#&A3Yxyo^;xdAPr?|EP437()S70!y;( zU^puD9e@Ku-wD_!^j(0bg}$2^_7GT$YcCU@A-HNV*BTi({#ncEAfED^&<}CjvjkS# zVJ047BJ?R|`WP6V7y5BPlh98v@g#wY)4I~jf~pB=j&dyhw1VdgjYa{3C%i zN3Vk6jL`oCcwgwR0p1q+>wvd}{suF=NnkC62oswLtTFr<4DSm4FWmMHfwf59W8z;4 zEM0#9hEIh4H^AS8{vmhyh`^HUA58q1VDRN!`xFdqLT}+(D}gokb|!WZSa0tH!_Pwh z4Dg-MKL>mx^e+Hk3jLqV@Gk=EwO=vu>$s=@`KR7=eGY63|5-|k%>PM zShDV20m@lpnke`-VqX2lpFj4J^B zg>fZ!=|?bBy}@K+0)dJo#y~I(7RDfMyNW=0MPmpP9R$kijH|(Li!iPMj1PKsxXqdEhUa2jfqYIWp0KG3}wQ|0Nf*tOhBG6vH)X+ktZaIY{XGjR%mT24k06YnFi%xx+dJi;jE zwh{u%dZsb4lt8&d!wZJl!k7-2DU2BapD@a~X9azN$?j6cK?5&w)wgi*^3bp+O+5&z(mjrnm5 zh=2AAg|QHPuM1-lV6QM112za_3E**IECs9(#xiDKPSE!+TwBS&@vo24=PEF)7shHp zy)f2rm$d|zmN@xgJs-ILt&DFWq!jE#88c42G+Y!L>>KVx$o-&W?^Mqo|j4lwK% z2I8L_^ezI+TK6#VX#z{6XTWe$82bRv38N8kP#F8U=K%ui(TAA$EP={3jKg3!CX6H8 zc9g(+-*G0MAh71I2@Ed@;}qZpVVveJ&l6a-8)o8*1lHSM2E(hu_#?NyLSSj|PfUD` z!14@lfZ;1)yb1V77;gdI6-ET`XJIrm!`lQ`^N0*Jx9_&^wcWrp_&EII$i z#19G7R?ql57&?XV55T9w_?WwVLSU(&1#RuZXyvvx0_%MpOgt0E@EI8XDU8p#?F)iy zII0={V&azs*5djaHSm0c|33%=J&pee<6G|eU0l!anfTv0;*VhXMHoME+s_1+@V_$g zw^*XxhT7S}t^w@Au2Y-cAW+$_{VXPSBT#c>KL@q*guOerolBs6f&F|YUO-?mT!b2~ zk$M1dGtv`~And&WxX!y6aEY+@2H?i84}p@}ekl_#BT!nhUyfQoVZVagIQ}Vfv-fAB zNnpKw0BS>peIQ`4un*!cR}rZ7U?0Ln2Z1`tuwRYZaACiO+lF!5gIv3oVMHA9dep`U z`$)ho!afRsYrxR}4D1F1<)Q62G4W;s1}Vb9>UOahg#+OwIMLtwoq7d5}I-wn7=*z*AQ2zx#N zN1X)#jIWTuvV!qUoIo&a57#CFaMd@7Ym*7oqO{-3#3BMqPB&^e&YTMH2zxPiDIu`N zH;svYUEC83dLP6-@LIs7-~v5;a`Q%>>LA_5c7^eL?DCuOhIFkK>8pRHVgYohQ}EA`xiUDe_74!vdbE7Loxo>ky7lf zs{S;i)Z_QLD?CNslCYyLHC2{_pCoyF&LscrY5qWotJv?W^7=f!s+N>ue~Bk4wb1Mq zE(#1Y`h`wO)wR^EOs%XM zo|K37KqxD+I&z@7Cejo+6=_VJQ#L#~C$0VI9qoq>IE(!uUsa&G74LD}u(7}!Eb{t_ zJn}srx34JV^ZI?ZZt32jzla2KCb1~v$9V!Cug_ar1rajC&!L2RaT#RGDfD@(JSAqP z*H>ENuYio1dFbjlK`-$`+RQwEwL4Vd84d#E&T6UM+EV*WOWp34+M{O3{0#z6jyx85 zHgY_2BytQw?}z9|nrkDCk(0pX=F+q?i}$y!KbcQ=YZ?P{qzh!MBV+u!!s5{$+SQNQpGHx!T)*@{79z|Hg3klPQgMyIS` zXljY~es2(?4r)e=G>@IHnWvLebDU1w7Fi1^U^S6LkY#b?s2N!qc^0A_ zYhD1sYRsisXSSVcJ+ZHC-r~->hBnjDVvb5m%qO$GhSidq81#C4czy~Bu-@RnZV*RS zJWz(KrFL^m?GgMnL#g4pvPU449$67NtOnkCY#j{MTqgzYb!A`ZJE=gehorJlm0Lb} z;CMVXEh$tLZt&sRs7`n1wbZSV>!J1(6y33GF{zmPZ1Lk28IhIEb!=2HsMaG(TaO$~ z7~h)U-mLXW2$qext%htQVP4soahteUpCtz{qM*!MQCSvBXp)QOyb*=U3i8IXRIa>G zMR==^hwRMrSGavN>U7ZEzVKc-7wLI^Ss?{r-R_}vyP>6SF{uHq72(~IOB%wa=DNrU zs9xEAR>#S;ox7fHYditPq*Vpn-m2gyd04k@pWjzq;SU9iDg*x6)n-$>Y5NosI>z@m z`*cY6!Nv`B4Vo{lQdR?=AjBE8n%1H890yzK7UHiN0!=!O^;mx=G5PpwI?R#gI&(8= zsi8zF$@P45&TVhp2qm35{LZwu62Iw{P@k|lp^5C$^9kmvFrT%OT$rDTH)kv7kY5G6 z3_^;WRIIcTe~`U{6ecsg5RF0U9fGu2U>|i2ccIyJ7`K*QMJ)r>T2JxwsZaNwxb7GF7mfe z{i;kSKb4h~o?l!x%Tq#&B`sf-f@%37Pce*PS^%Z+PJ{*ds@kJ1wcD-tJc{>NChkP1 zU9!KNzMYW+Sgw{2=~%g;ZQc={zbrX_**Ar@MM;~P;t5p+i_6&C_?%8p05)BXNli~I_psSd3%C-?Wu?@|V#B)N z?&}`utl838yFJGFDV{PZcW&fyaxy1kTz*^AVKu?hxEw$2F;6E#kWVoKh2b^QJ(q@` ztCF&q$Q{S{T=U1rIU_#SFy^uKJY*{2;7nweY%$5C2b_R=2dO-nP2Adl|t6G6$@dD z?NMPbd;ezi&P``wGjq$(y%M%fRv1&2tJRez8!;NWP#xkJ8~y5*+AU=I(%fUF)FEEQ zpz)>XhcL# z9X0y8k=G|QIm8qB0Z%0y7}BNq$XwLrc?SKKQdM2)Nw0v|RqjA_2fM@x%LcJ-(z3nf zRem2$>XTB+<|R-+`8pZLauT7xA)b3GN1pU(QWeKXF~t1yJVcdshv~E4XiWMun#;z zbZF86=9AaP$jz8W3c~xOKa_E0fwYdp8^|Oc!(fm=U{D8@!k1>XEnU{Jetl=dDRgR$ z+UFIX7xC_lT(y`o+?5pa9eO5So`!1ppj@Y^C_x@9srI$o+Mj&d3{e}!`mD%4n4Sz| zls-E2ikOv^_}ZCiJAZhXAG zaV7bKan$bp9jA1Tg^_0>&so;6F46?kc$Sjl&W*dk8Z)w_8X$O^`@qSFf8-dU_^;}A6Mlyy$8Pz>*rZBTaG^2YX~O=ie}PSQ1Qmd*;Us>)M5|kR1nlCo} z(@f=IgA-!Zoa?U)mAeC~O}eWd5Ex$743V>S2#0dAEm$*@&Lq~bP%wpwO1>tCeRQlu zFXll<=}4I6hAcfDTZ$R@55ZirjA@sZpu^sZIhKE+qR#D4v>(|A6`?9s^f+0f+}ObS zA%gvv7!}>9-U|^XFWP-%+nbXT>T}r7_jQ#DW%*K#rwlMOJ(@MPGp%(Kdnr{${`$Q$v~GId5X*n{wH!!W#jEj zcDL=E8zb;oQW?!D4v6Frfk~whV}s>#{hT#1MuWz~UqM<|ma;NeFyQu-Lw}BLeWl{? zcUr(Hso_24kT8#ShezdhsTSMYJ+wGvFR97;$YI(gk>iFhY(2j1%)$e03)aTScUKJy z1%)Qw$3>;k3{M~wbR#)|h~3Us3QcH^;l<&P{7HMIf*f0x8e6VA;0-1;B{av(gkwc5 z#+Ix?sgtrf?;LUpv%FPvJOQ;aOU(~?=ct6v8w|>s7)uEqHo`|aGs12VdE8#!4d!tk z7deVdt`Q^VO|Z=7w(N-&x5j?w@Ts>*am{Wyo~BL566&pu?ZglwQ<)rTs}gu{&luL2 zS;PUI{3X;6-kZ=gxM%FtRs0*nOR|QS}5%UVw`pgRjgDO2ulV#e7rJ}^?G?AqCUGmgKrtR2-I`hKEsHe=IH(>$HZJx^- zceS2+1iP>HHTx3AclivQ_sO4B`aP=Ho4a8iURM|ohbR` zmJil1i(UEhQ=~LHET~%;lU${vSNITb60;&3;Z0=nvOwkpY3&C$x6Nye(PM^^3_^}p z#V*Sj?+ygrv!KXxezYv(bPTKOG%?8W;pA8TqDFXB~&9()* zVkAzTzI)lBVwaS7=SrvoaRz?ljimLl-)Q{R&#{x zmx?q?>8c{lXOcY7kvTB3oc}#j8M0@1v#*3k!0GTw%+_-F|3hjmpIP*D{EXq?m~@Rn zSc{;3ta3f+p7J1epfX&ZyG{*627+@ojFIc}9v(#Yc#k){F%Vq=&Lj`wVMyljnMqzx zxj%pkjU_9|8{UPwTp@Bi*vLLXBcX$d+QTt(kxiV#qiZBr9ta`5hhM^Z%3K-rvpSwQ z8M9Pyro!Phqo!+2IHQ-ps5{N}`hqhsp!14jX6D*&PpO$np2&kLEWn1-Q(WeiV5I;v z;DU0J=YVWciZRCAm|P$kJkr)&1;u6IT{A;8N*oZ)l^f{VxIN4HA-JX~XJL=`Z1V!F zd}P@&?cRjy+{h_x`Sz=GNi4Ln?MtDX#a-%&F~anZd4QO|E{AIQ;@p6{G$hUN{MgjO z_Up@-J(o%>C%SNYXgXYQUls9#9g5MW$X1MTvMYd;%yh&I6&&G)3gnqXG2ST}KPE4{ zi;^X|3-Qt;X+Po_ipid0(2kt#_INbh)@|wM;yEkl>Z;)FR2(SeTOOMpUk0a?b zmriIsv8esYwes&891AvfJtamvV@NsVG%cgB5RN}70c`&BpqVAtc>0{o|yj);)_$@M5rPK5%TLw79Si!sJb9j^4`PtFH!QmKgH{17AE zx!2Yx>5_1OF0IRz6e`CQuzcyNBKP->9>*xNQO)cpam0-DndTxcX3!yl%`#kF{z51K*3`j7e6zIIXk1 zq)>608%HIW&4KU5jQ;8uj#{Hvhk7zdl&Z69Du(0@PI?mJLn)R#DM0ps|9}GELJ=sS z09@ak=-NE=d|qXDhzre2tgVBqecvh!w|(t%F+#hdvD%uTt}us(AU7^YlI`fZ8bVvA zYsT_x;0y|sqjVO8PXuOoyjVPG1tFAZ?VvOh=`)!Vl`W5tLkNf)>+zl3&gZ3@5yNV% z^HNvalN-;}pl__i$&W7G@Ejo~w9eM!f>}d~=&? zqpN<~wzel5f4Ay)cjZS{TltX{n#N)3ebUBy{jB7diN(3!ErP}qRrZRL+Nsw1b#j>n z{{Vt30%sBOgbOJIvj4^L8J&DY)CsS^(%2te$zwa~Pqj6!!e)RkIbzHuCAvDg9F`?| zffq4AM%9Z?#2U-s4v3zmt}UHeUWhJ_t$fZbk1FCOB)nIaA$}@#fYPxu#-fJg*dNDQ zi=+2O*kuOpMUL<(2ZBcD@nurjJ+VpXn9sVDoT!Q_!X@p8D*r)%<)u}aaCq;hzK$(r3IPmt_}`Q!kH{>mtiX$%WJF!nUm`d?+xOj zHp%Us?GHl!0_^OMAo-N%ib&mLA}UX1)6U3zCT`;}hMbVJ_DiS)cVs!0#0q)pjuTOg zrBDA@ollb6;7scvDKj(+msA*nqt7qW!0{J$lIfp?{b3bk#?}{Sgk5(T3$Q;#7rFad zhjdFGJ{)bXg_=8dti;hn$3niQiC|Jq|L%3n8 z*WL9DWS2&y0g<4NuGO$!tsHX&r6LD7o{eok@C3$+wa~tJAud>BR)OW-F)3+k{K~fS zyw&a!xy!#~zOpU3e2ad{!l3iIqOS)R2I`JU7HRQbT93h{;Jgur{8)~+0iJd41| z_WhJuWI=XmHv3u3thR;?Sgak#55>o^w6eGF{m4F48qVm@^LIreWl=XN;|lKSNOEN_&HLnV2ndMN6T(mIZ9Z(qATW&-XaOQZBPNfp(>YGMWM zy)drJ24Y-v6`m$1AaFJ37HNso)a?P4=-R*;o(m#{R;O&{$chPoE#Wf(xcfa=5q+hpu8-#`J1A_kyT@p#Kuj#r4;;iSGaMt@12f0_eoz@ zEhm2h3covP4$sA!kk=Q~Nm}`o^ED zAmFX^m&@I7-}{u+%QSoV%}bR>b*bDY&7}_Dd`?#Ydtv%WqA+G>gWC)4BJBu$WEVMh z975t71#Bc>NNRtNHDPs?2c>fNqa3M9M>`hmmq9Gvo7S-CNJmX7VH8&QN|A@z(sVAL z8M7qL{;nLeny7Za*@;0cP`9^4&ZG=t8HqSl1YtVu1~XNynvdzTxEkJ9BAa=mhhg9g zA&SZJtD7S&b@J3AW)ERKV|tiwPR7xP7`)!)!Tup!J{v5DW@)61 zv4%Ltd76v$E;_$Xa?ijw6OJoxj2WKc`$Y~<&ClfMM@8&{$!t?A5ZK{pJ-!0<@ql!} zrXtH&IG)S+h=9#4=3|0_@Iqn_(gP6!)mH_$KUe13xqS^TcVTVTxhp?Sev3XED#>UJe$}h(_tnk4VB^T-4zU894e{8CxfG8Ty&ItMDB| zCq9^2eJb9{W9~;XNq4XhzKVi%xTS?(**ivi+MfARd)e$;(h~y}9_22bTqe)nZzhac zHimbh6?VX5hYjGPivN|Dp&oJ8b+(p_58trE#Qh1h-Tz!AwTzWOw3tQ+^2Ka(br*+~v3shwKh>IhK(8iwi6KC2t46fQ>C&9rj8%kCg!k zP4X`PaTvG!9-%b>6t4F)`bCq)u2(`d^PxR&e3|W@jWfTJ zYm5CAos;kZ0}efk@C{^zyQ-+dJ>4HDy5AF!U%54Czi-92gk7zhGN*>Tw8m{;sL27al literal 0 HcmV?d00001 diff --git a/pkg/parser/tests/base-grok-expression/base-grok.yaml b/pkg/parser/tests/base-grok-expression/base-grok.yaml new file mode 100644 index 0000000..ed2616c --- /dev/null +++ b/pkg/parser/tests/base-grok-expression/base-grok.yaml @@ -0,0 +1,13 @@ +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +pattern_syntax: + MYCAP1: ".*" +nodes: + - grok: + pattern: ^xxheader %{MYCAP1:extracted_value} trailing stuff$ + expression: evt.Line.Raw +statics: + - meta: log_type + value: parsed_testlog diff --git a/pkg/parser/tests/base-grok-expression/parsers.yaml b/pkg/parser/tests/base-grok-expression/parsers.yaml new file mode 100644 index 0000000..775f889 --- /dev/null +++ b/pkg/parser/tests/base-grok-expression/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/base-grok-expression/test.yaml b/pkg/parser/tests/base-grok-expression/test.yaml new file mode 100644 index 0000000..679e9e0 --- /dev/null +++ b/pkg/parser/tests/base-grok-expression/test.yaml @@ -0,0 +1,28 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE1 trailing stuff + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE2 trailing stuff +#these are the results we expect from the parser +results: + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE1 + Process: true + Stage: s00-raw + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE2 + Process: true + Stage: s00-raw + diff --git a/pkg/parser/tests/base-grok-external-data/base-grok.yaml b/pkg/parser/tests/base-grok-external-data/base-grok.yaml new file mode 100644 index 0000000..a484ab0 --- /dev/null +++ b/pkg/parser/tests/base-grok-external-data/base-grok.yaml @@ -0,0 +1,23 @@ +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +data: + - source_url: https://invalid.com/test.list + dest_file: ./sample_strings.txt + type: string + +pattern_syntax: + MYCAP_EXT: ".*" +nodes: + - grok: + pattern: ^xxheader %{MYCAP_EXT:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: log_type + value: parsed_testlog + - meta: is_it_in_file + expression: |- + evt.Parsed.extracted_value in File("./sample_strings.txt") ? "true" : "false" + + diff --git a/pkg/parser/tests/base-grok-external-data/parsers.yaml b/pkg/parser/tests/base-grok-external-data/parsers.yaml new file mode 100644 index 0000000..775f889 --- /dev/null +++ b/pkg/parser/tests/base-grok-external-data/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/base-grok-external-data/test.yaml b/pkg/parser/tests/base-grok-external-data/test.yaml new file mode 100644 index 0000000..b208711 --- /dev/null +++ b/pkg/parser/tests/base-grok-external-data/test.yaml @@ -0,0 +1,32 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE1 trailing stuff + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE2 trailing stuff +#these are the results we expect from the parser +results: + + - Meta: + log_type: parsed_testlog + is_it_in_file: true + Parsed: + extracted_value: VALUE1 + + Process: true + Stage: s00-raw + - Meta: + log_type: parsed_testlog + is_it_in_file: false + Parsed: + extracted_value: VALUE2 + Process: true + Stage: s00-raw + diff --git a/pkg/parser/tests/base-grok-import/base-grok.yaml b/pkg/parser/tests/base-grok-import/base-grok.yaml new file mode 100644 index 0000000..0d451d0 --- /dev/null +++ b/pkg/parser/tests/base-grok-import/base-grok.yaml @@ -0,0 +1,16 @@ +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +nodes: + - grok: + #USERNAME is a pattern defined by the grokky library we are using + name: SYSLOGFACILITY + apply_on: Line.Raw + statics: + - enriched: subgrok_static_why_is_it_still_here + value: because +statics: + - meta: log_type + value: parsed_testlog + diff --git a/pkg/parser/tests/base-grok-import/parsers.yaml b/pkg/parser/tests/base-grok-import/parsers.yaml new file mode 100644 index 0000000..775f889 --- /dev/null +++ b/pkg/parser/tests/base-grok-import/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/base-grok-import/test.yaml b/pkg/parser/tests/base-grok-import/test.yaml new file mode 100644 index 0000000..4ce26fd --- /dev/null +++ b/pkg/parser/tests/base-grok-import/test.yaml @@ -0,0 +1,43 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: testlog + Raw: <123.120> + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: <123.121> + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: XXXX +#these are the results we expect from the parser +results: + - Meta: + log_type: parsed_testlog + Parsed: + facility: 123 + priority: 120 + Enriched: + subgrok_static_why_is_it_still_here: because + Process: true + Stage: s00-raw + - Meta: + log_type: parsed_testlog + Parsed: + facility: 123 + priority: 121 + Enriched: + subgrok_static_why_is_it_still_here: because + Process: true + Stage: s00-raw + - Process: false + Stage: s00-raw + Line: + Raw: XXXX diff --git a/pkg/parser/tests/base-grok-no-subnode/base-grok.yaml b/pkg/parser/tests/base-grok-no-subnode/base-grok.yaml new file mode 100644 index 0000000..4589ac9 --- /dev/null +++ b/pkg/parser/tests/base-grok-no-subnode/base-grok.yaml @@ -0,0 +1,13 @@ +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +pattern_syntax: + MYCAP2: ".*" +grok: + pattern: ^xxheader %{MYCAP2:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: log_type + value: parsed_testlog + diff --git a/pkg/parser/tests/base-grok-no-subnode/parsers.yaml b/pkg/parser/tests/base-grok-no-subnode/parsers.yaml new file mode 100644 index 0000000..775f889 --- /dev/null +++ b/pkg/parser/tests/base-grok-no-subnode/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/base-grok-no-subnode/test.yaml b/pkg/parser/tests/base-grok-no-subnode/test.yaml new file mode 100644 index 0000000..bc83002 --- /dev/null +++ b/pkg/parser/tests/base-grok-no-subnode/test.yaml @@ -0,0 +1,29 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE1 trailing stuff + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE2 trailing stuff +#these are the results we expect from the parser +results: + + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE1 + Process: true + Stage: s00-raw + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE2 + Process: true + Stage: s00-raw + diff --git a/pkg/parser/tests/base-grok/base-grok.yaml b/pkg/parser/tests/base-grok/base-grok.yaml new file mode 100644 index 0000000..7811c65 --- /dev/null +++ b/pkg/parser/tests/base-grok/base-grok.yaml @@ -0,0 +1,14 @@ +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +pattern_syntax: + MYCAP1: ".*" +nodes: + - grok: + pattern: ^xxheader %{MYCAP1:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: log_type + value: parsed_testlog + diff --git a/pkg/parser/tests/base-grok/parsers.yaml b/pkg/parser/tests/base-grok/parsers.yaml new file mode 100644 index 0000000..775f889 --- /dev/null +++ b/pkg/parser/tests/base-grok/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/base-grok/test.yaml b/pkg/parser/tests/base-grok/test.yaml new file mode 100644 index 0000000..bc83002 --- /dev/null +++ b/pkg/parser/tests/base-grok/test.yaml @@ -0,0 +1,29 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE1 trailing stuff + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE2 trailing stuff +#these are the results we expect from the parser +results: + + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE1 + Process: true + Stage: s00-raw + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE2 + Process: true + Stage: s00-raw + diff --git a/pkg/parser/tests/base-json-extract/base-grok.yaml b/pkg/parser/tests/base-json-extract/base-grok.yaml new file mode 100644 index 0000000..53d274c --- /dev/null +++ b/pkg/parser/tests/base-json-extract/base-grok.yaml @@ -0,0 +1,17 @@ +filter: "evt.Line.Labels.type == 'json-1'" +debug: true +onsuccess: next_stage +name: tests/base-json-extract +statics: + - parsed: message + expression: JsonExtract(evt.Line.Raw, "log") + - meta: other_field + expression: JsonExtract(evt.Line.Raw, "testfield") + - meta: program + expression: evt.Line.Labels.progrname + - parsed: extracted_array + expression: JsonExtract(evt.Line.Raw, "nested_1.anarray") + - parsed: extracted_array_field + expression: JsonExtract(evt.Line.Raw, "nested_1.anarray[0]") + + diff --git a/pkg/parser/tests/base-json-extract/base-grok2.yaml b/pkg/parser/tests/base-json-extract/base-grok2.yaml new file mode 100644 index 0000000..64b971b --- /dev/null +++ b/pkg/parser/tests/base-json-extract/base-grok2.yaml @@ -0,0 +1,16 @@ +filter: "evt.Meta.program == 'my_test_prog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +pattern_syntax: + MYCAP3: ".*" +nodes: + - grok: + pattern: ^xxheader %{MYCAP3:extracted_value} trailing stuff$ + apply_on: message +statics: + - meta: log_type + value: parsed_testlog + - parsed: extracted_arrayfield_from_object + expression: JsonExtract(evt.Parsed.extracted_array, '[1]') + diff --git a/pkg/parser/tests/base-json-extract/parsers.yaml b/pkg/parser/tests/base-json-extract/parsers.yaml new file mode 100644 index 0000000..32760f9 --- /dev/null +++ b/pkg/parser/tests/base-json-extract/parsers.yaml @@ -0,0 +1,4 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw + - filename: {{.TestDirectory}}/base-grok2.yaml + stage: s01-parse diff --git a/pkg/parser/tests/base-json-extract/test.yaml b/pkg/parser/tests/base-json-extract/test.yaml new file mode 100644 index 0000000..b1e0e5a --- /dev/null +++ b/pkg/parser/tests/base-json-extract/test.yaml @@ -0,0 +1,19 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + type: json-1 + progrname: my_test_prog + Raw: '{"testfield": "some stuff", "log": "xxheader VALUE1 trailing stuff", "nested_1" : {"anarray" : ["foo","bar","xx1"], "xxx" : "zzzz"}}' +results: + - Meta: + other_field: some stuff + program: my_test_prog + Parsed: + message: xxheader VALUE1 trailing stuff + extracted_value: VALUE1 + extracted_array_field: foo + extracted_array: '["foo","bar","xx1"]' + extracted_arrayfield_from_object: bar + Process: true + diff --git a/pkg/parser/tests/base-tree/base-grok.yaml b/pkg/parser/tests/base-tree/base-grok.yaml new file mode 100644 index 0000000..ecd7790 --- /dev/null +++ b/pkg/parser/tests/base-tree/base-grok.yaml @@ -0,0 +1,33 @@ +#Here we are testing the trees within the node +filter: "evt.Line.Labels.type == 'type1'" +debug: true +name: tests/base-grok-root +pattern_syntax: + MYCAP4: ".*" +grok: + pattern: ^xxheader %{MYCAP4:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: state + value: root-done + - meta: state_sub + expression: evt.Parsed.extracted_value +--- +filter: "evt.Line.Labels.type == 'type1' && evt.Meta.state == 'root-done'" +debug: true +onsuccess: next_stage +name: tests/base-grok-leafs +#the sub-nodes will process the result of the master node +nodes: + - filter: "evt.Parsed.extracted_value == 'VALUE1'" + debug: true + statics: + - meta: final_state + value: leaf1 + - filter: "evt.Parsed.extracted_value == 'VALUE2'" + debug: true + statics: + - meta: final_state + value: leaf2 + + diff --git a/pkg/parser/tests/base-tree/parsers.yaml b/pkg/parser/tests/base-tree/parsers.yaml new file mode 100644 index 0000000..775f889 --- /dev/null +++ b/pkg/parser/tests/base-tree/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/base-tree/test.yaml b/pkg/parser/tests/base-tree/test.yaml new file mode 100644 index 0000000..2650e81 --- /dev/null +++ b/pkg/parser/tests/base-tree/test.yaml @@ -0,0 +1,30 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: type1 + Raw: xxheader VALUE1 trailing stuff + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: type1 + Raw: xxheader VALUE2 trailing stuff +#these are the results we expect from the parser +results: + - Meta: + final_state: leaf1 + state_sub: VALUE1 + Parsed: + extracted_value: VALUE1 + Process: true + Stage: s00-raw + - Meta: + final_state: leaf2 + state_sub: VALUE2 + Parsed: + extracted_value: VALUE2 + Process: true + Stage: s00-raw + diff --git a/pkg/parser/tests/dateparser-enrich/base-grok.yaml b/pkg/parser/tests/dateparser-enrich/base-grok.yaml new file mode 100644 index 0000000..781b4a6 --- /dev/null +++ b/pkg/parser/tests/dateparser-enrich/base-grok.yaml @@ -0,0 +1,10 @@ +filter: "evt.StrTime != ''" +name: test/dateparse +debug: true +#it's a hack lol +statics: + - method: ParseDate + expression: evt.StrTime + - target: MarshaledTime + expression: evt.Enriched.MarshaledTime + diff --git a/pkg/parser/tests/dateparser-enrich/parsers.yaml b/pkg/parser/tests/dateparser-enrich/parsers.yaml new file mode 100644 index 0000000..775f889 --- /dev/null +++ b/pkg/parser/tests/dateparser-enrich/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/dateparser-enrich/test.yaml b/pkg/parser/tests/dateparser-enrich/test.yaml new file mode 100644 index 0000000..67edd9a --- /dev/null +++ b/pkg/parser/tests/dateparser-enrich/test.yaml @@ -0,0 +1,22 @@ +#these are the events we input into parser +lines: + - StrTime: 2012/11/01 + Parsed: + test: format1 + - StrTime: 11/02/2012 13:37:05 + Parsed: + test: format2 +#these are the results we expect from the parser +results: + - Parsed: + test: format1 + Enriched: + MarshaledTime: "2012-11-01T00:00:00Z" + Process: true + Stage: s00-raw + - Parsed: + test: format2 + Enriched: + MarshaledTime: "2012-11-02T13:37:05Z" + Process: true + Stage: s00-raw diff --git a/pkg/parser/tests/geoip-enrich/base-grok.yaml b/pkg/parser/tests/geoip-enrich/base-grok.yaml new file mode 100644 index 0000000..a25875c --- /dev/null +++ b/pkg/parser/tests/geoip-enrich/base-grok.yaml @@ -0,0 +1,22 @@ +filter: "'source_ip' in evt.Meta" +name: tests/geoip-enrich +description: "Populate event with geoloc info : as, country, coords, source range." +statics: + - method: GeoIpCity + expression: evt.Meta.source_ip + - meta: IsoCode + expression: evt.Enriched.IsoCode + - meta: IsInEU + expression: evt.Enriched.IsInEU + - meta: GeoCoords + expression: evt.Enriched.GeoCoords + - method: GeoIpASN + expression: evt.Meta.source_ip + - meta: ASNNumber + expression: evt.Enriched.ASNNumber + - meta: ASNOrg + expression: evt.Enriched.ASNOrg + - method: IpToRange + expression: evt.Meta.source_ip + - meta: SourceRange + expression: evt.Enriched.SourceRange diff --git a/pkg/parser/tests/geoip-enrich/parsers.yaml b/pkg/parser/tests/geoip-enrich/parsers.yaml new file mode 100644 index 0000000..775f889 --- /dev/null +++ b/pkg/parser/tests/geoip-enrich/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/geoip-enrich/test.yaml b/pkg/parser/tests/geoip-enrich/test.yaml new file mode 100644 index 0000000..5d0abcb --- /dev/null +++ b/pkg/parser/tests/geoip-enrich/test.yaml @@ -0,0 +1,27 @@ +#these are the events we input into parser +lines: + - Meta: + test: test1 + source_ip: 8.8.8.8 + - Meta: + test: test2 + source_ip: 192.168.0.1 +#these are the results we expect from the parser +results: + - Process: true + Enriched: + IsoCode: US + IsInEU: false + ASNOrg: Google LLC + Meta: + source_ip: 8.8.8.8 + - Process: true + Enriched: + IsInEU: false + IsoCode: + ASNOrg: + Meta: + source_ip: 192.168.0.1 + + + diff --git a/pkg/parser/tests/multi-stage-grok/base-grok-s00.yaml b/pkg/parser/tests/multi-stage-grok/base-grok-s00.yaml new file mode 100644 index 0000000..0425f90 --- /dev/null +++ b/pkg/parser/tests/multi-stage-grok/base-grok-s00.yaml @@ -0,0 +1,12 @@ +filter: "evt.Line.Labels.type == 'testlog'" +debug: true +onsuccess: next_stage +name: tests/base-grok +nodes: + - grok: + pattern: ^xxheader %{GREEDYDATA:extracted_value} trailing stuff$ + apply_on: Line.Raw +statics: + - meta: log_type + value: parsed_testlog + diff --git a/pkg/parser/tests/multi-stage-grok/base-grok-s01.yaml b/pkg/parser/tests/multi-stage-grok/base-grok-s01.yaml new file mode 100644 index 0000000..1e06a85 --- /dev/null +++ b/pkg/parser/tests/multi-stage-grok/base-grok-s01.yaml @@ -0,0 +1,11 @@ +#only one of the events is going to throu filter +filter: "evt.Parsed.extracted_value == 'VALUE1'" +debug: true +onsuccess: next_stage +name: tests/second-stage-grok +statics: + - meta: did_second_stage + value: yes + - target: evt.Parsed.test_bis + value: lolilol + diff --git a/pkg/parser/tests/multi-stage-grok/parsers.yaml b/pkg/parser/tests/multi-stage-grok/parsers.yaml new file mode 100644 index 0000000..c0e4c74 --- /dev/null +++ b/pkg/parser/tests/multi-stage-grok/parsers.yaml @@ -0,0 +1,4 @@ + - filename: {{.TestDirectory}}/base-grok-s00.yaml + stage: s00-raw + - filename: {{.TestDirectory}}/base-grok-s01.yaml + stage: s01-raw diff --git a/pkg/parser/tests/multi-stage-grok/test.yaml b/pkg/parser/tests/multi-stage-grok/test.yaml new file mode 100644 index 0000000..2113aff --- /dev/null +++ b/pkg/parser/tests/multi-stage-grok/test.yaml @@ -0,0 +1,29 @@ +#these are the events we input into parser +lines: + - Line: + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE1 trailing stuff + - Line: + #see tricky case : first one is nginx via syslog, the second one is local nginx :) + Labels: + #this one will be checked by a filter + type: testlog + Raw: xxheader VALUE2 trailing stuff +#these are the results we expect from the parser +results: + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE1 + test_bis: lolilol + Process: true + Stage: s01-raw + #because of how our second stage parser is done, this one won't pass stage + - Meta: + log_type: parsed_testlog + Parsed: + extracted_value: VALUE2 + Process: false + Stage: s01-raw diff --git a/pkg/parser/tests/reverse-dns-enrich/base-grok.yaml b/pkg/parser/tests/reverse-dns-enrich/base-grok.yaml new file mode 100644 index 0000000..188b439 --- /dev/null +++ b/pkg/parser/tests/reverse-dns-enrich/base-grok.yaml @@ -0,0 +1,8 @@ +#filter: "evt.Overflow.Labels.remediation == 'true'" +name: tests/rdns +description: "Lookup the DNS assiocated to the source IP only for overflows" +statics: + - method: reverse_dns + expression: evt.Enriched.IpToResolve + - meta: did_dns_succeeded + expression: 'evt.Enriched.reverse_dns == "" ? "no" : "yes"' diff --git a/pkg/parser/tests/reverse-dns-enrich/parsers.yaml b/pkg/parser/tests/reverse-dns-enrich/parsers.yaml new file mode 100644 index 0000000..775f889 --- /dev/null +++ b/pkg/parser/tests/reverse-dns-enrich/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/reverse-dns-enrich/test.yaml b/pkg/parser/tests/reverse-dns-enrich/test.yaml new file mode 100644 index 0000000..1495d3f --- /dev/null +++ b/pkg/parser/tests/reverse-dns-enrich/test.yaml @@ -0,0 +1,21 @@ +#these are the events we input into parser +lines: + - Enriched: + IpToResolve: 8.8.8.8 + - Enriched: + IpToResolve: 1.2.3.4 +#these are the results we expect from the parser +results: + - Enriched: + reverse_dns: dns.google. + IpToResolve: 8.8.8.8 + Meta: + did_dns_succeeded: yes + Process: true + Stage: s00-raw + - Enriched: + IpToResolve: 1.2.3.4 + Meta: + did_dns_succeeded: no + Process: true + Stage: s00-raw diff --git a/pkg/parser/tests/sample_strings.txt b/pkg/parser/tests/sample_strings.txt new file mode 100644 index 0000000..f386f89 --- /dev/null +++ b/pkg/parser/tests/sample_strings.txt @@ -0,0 +1,3 @@ +VALUE1 +VALUE3 +RATATA diff --git a/pkg/parser/tests/whitelist-base/base-grok.yaml b/pkg/parser/tests/whitelist-base/base-grok.yaml new file mode 100644 index 0000000..44cbd10 --- /dev/null +++ b/pkg/parser/tests/whitelist-base/base-grok.yaml @@ -0,0 +1,14 @@ +name: test/whitelists +description: "Whitelist tests" +debug: true +whitelist: + reason: "Whitelist tests" + ip: + - 8.8.8.8 + cidr: + - "1.2.3.0/24" + expression: + - "'supertoken1234' == evt.Enriched.test_token" +statics: + - meta: statics + value: success diff --git a/pkg/parser/tests/whitelist-base/parsers.yaml b/pkg/parser/tests/whitelist-base/parsers.yaml new file mode 100644 index 0000000..775f889 --- /dev/null +++ b/pkg/parser/tests/whitelist-base/parsers.yaml @@ -0,0 +1,2 @@ + - filename: {{.TestDirectory}}/base-grok.yaml + stage: s00-raw diff --git a/pkg/parser/tests/whitelist-base/test.yaml b/pkg/parser/tests/whitelist-base/test.yaml new file mode 100644 index 0000000..4524e95 --- /dev/null +++ b/pkg/parser/tests/whitelist-base/test.yaml @@ -0,0 +1,53 @@ +#these are the events we input into parser +lines: + - Meta: + test: test1 + source_ip: 8.8.8.8 + statics: toto + - Meta: + test: test2 + source_ip: 1.2.3.4 + statics: toto + - Meta: + test: test3 + source_ip: 2.2.3.4 + statics: toto + - Meta: + test: test4 + source_ip: 8.8.8.9 + statics: toto + - Enriched: + test_token: supertoken1234 + Meta: + test: test5 + statics: toto +#these are the results we expect from the parser +results: + - Whitelisted: true + Process: true + Meta: + test: test1 + statics: success + - Whitelisted: true + Process: true + Meta: + test: test2 + statics: success + - Whitelisted: false + Process: true + Meta: + test: test3 + statics: toto + - Whitelisted: false + Process: true + Meta: + test: test4 + statics: toto + - Whitelisted: true + Process: true + Meta: + test: test5 + statics: success + + + diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go new file mode 100644 index 0000000..6707769 --- /dev/null +++ b/pkg/parser/unix_parser.go @@ -0,0 +1,109 @@ +package parser + +import ( + "fmt" + "os" + "path" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + + "github.com/crowdsecurity/grokky" + log "github.com/sirupsen/logrus" +) + +type UnixParserCtx struct { + Grok grokky.Host + Stages []string + Profiling bool + DataFolder string +} + +type Parsers struct { + Ctx *UnixParserCtx + Povfwctx *UnixParserCtx + StageFiles []Stagefile + PovfwStageFiles []Stagefile + Nodes []Node + Povfwnodes []Node + EnricherCtx EnricherCtx +} + +func Init(c map[string]interface{}) (*UnixParserCtx, error) { + r := UnixParserCtx{} + r.Grok = grokky.NewBase() + files, err := os.ReadDir(c["patterns"].(string)) + if err != nil { + return nil, err + } + r.DataFolder = c["data"].(string) + for _, f := range files { + if strings.Contains(f.Name(), ".") { + continue + } + if err := r.Grok.AddFromFile(path.Join(c["patterns"].(string), f.Name())); err != nil { + log.Errorf("failed to load pattern %s : %v", f.Name(), err) + return nil, err + } + } + log.Debugf("Loaded %d pattern files", len(files)) + return &r, nil +} + +func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { + var err error + + patternsDir := path.Join(cConfig.Crowdsec.ConfigDir, "patterns/") + log.Infof("Loading grok library %s", patternsDir) + /* load base regexps for two grok parsers */ + parsers.Ctx, err = Init(map[string]interface{}{"patterns": patternsDir, + "data": cConfig.Crowdsec.DataDir}) + if err != nil { + return parsers, fmt.Errorf("failed to load parser patterns : %v", err) + } + parsers.Povfwctx, err = Init(map[string]interface{}{"patterns": patternsDir, + "data": cConfig.Crowdsec.DataDir}) + if err != nil { + return parsers, fmt.Errorf("failed to load postovflw parser patterns : %v", err) + } + + /* + Load enrichers + */ + log.Infof("Loading enrich plugins") + + parsers.EnricherCtx, err = Loadplugin(cConfig.Crowdsec.DataDir) + if err != nil { + return parsers, fmt.Errorf("Failed to load enrich plugin : %v", err) + } + + /* + Load the actual parsers + */ + + log.Infof("Loading parsers from %d files", len(parsers.StageFiles)) + + parsers.Nodes, err = LoadStages(parsers.StageFiles, parsers.Ctx, parsers.EnricherCtx) + if err != nil { + return parsers, fmt.Errorf("failed to load parser config : %v", err) + } + + if len(parsers.PovfwStageFiles) > 0 { + log.Infof("Loading postoverflow parsers") + parsers.Povfwnodes, err = LoadStages(parsers.PovfwStageFiles, parsers.Povfwctx, parsers.EnricherCtx) + } else { + parsers.Povfwnodes = []Node{} + log.Infof("No postoverflow parsers to load") + } + + if err != nil { + return parsers, fmt.Errorf("failed to load postoverflow config : %v", err) + } + + if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { + parsers.Ctx.Profiling = true + parsers.Povfwctx.Profiling = true + } + + return parsers, nil +} diff --git a/pkg/parser/whitelist.go b/pkg/parser/whitelist.go new file mode 100644 index 0000000..c56ad40 --- /dev/null +++ b/pkg/parser/whitelist.go @@ -0,0 +1,23 @@ +package parser + +import ( + "net" + + "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" +) + +type Whitelist struct { + Reason string `yaml:"reason,omitempty"` + Ips []string `yaml:"ip,omitempty"` + B_Ips []net.IP + Cidrs []string `yaml:"cidr,omitempty"` + B_Cidrs []*net.IPNet + Exprs []string `yaml:"expression,omitempty"` + B_Exprs []*ExprWhitelist +} + +type ExprWhitelist struct { + Filter *vm.Program + ExprDebugger *exprhelpers.ExprDebugger // used to debug expression by printing the content of each variable of the expression +} diff --git a/pkg/protobufs/README.md b/pkg/protobufs/README.md new file mode 100644 index 0000000..5156d9a --- /dev/null +++ b/pkg/protobufs/README.md @@ -0,0 +1,8 @@ +To generate go code for the `notifier.proto` files, run : + +``` +protoc --go_out=. --go_opt=paths=source_relative \ + --go-grpc_out=. --go-grpc_opt=paths=source_relative \ + proto/alert.proto` +``` + diff --git a/pkg/protobufs/notifier.pb.go b/pkg/protobufs/notifier.pb.go new file mode 100644 index 0000000..b5dc811 --- /dev/null +++ b/pkg/protobufs/notifier.pb.go @@ -0,0 +1,395 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.12.4 +// source: notifier.proto + +package protobufs + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Notification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Notification) Reset() { + *x = Notification{} + if protoimpl.UnsafeEnabled { + mi := &file_notifier_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Notification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Notification) ProtoMessage() {} + +func (x *Notification) ProtoReflect() protoreflect.Message { + mi := &file_notifier_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Notification.ProtoReflect.Descriptor instead. +func (*Notification) Descriptor() ([]byte, []int) { + return file_notifier_proto_rawDescGZIP(), []int{0} +} + +func (x *Notification) GetText() string { + if x != nil { + return x.Text + } + return "" +} + +func (x *Notification) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type Config struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config []byte `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *Config) Reset() { + *x = Config{} + if protoimpl.UnsafeEnabled { + mi := &file_notifier_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Config) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Config) ProtoMessage() {} + +func (x *Config) ProtoReflect() protoreflect.Message { + mi := &file_notifier_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Config.ProtoReflect.Descriptor instead. +func (*Config) Descriptor() ([]byte, []int) { + return file_notifier_proto_rawDescGZIP(), []int{1} +} + +func (x *Config) GetConfig() []byte { + if x != nil { + return x.Config + } + return nil +} + +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_notifier_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_notifier_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_notifier_proto_rawDescGZIP(), []int{2} +} + +var File_notifier_proto protoreflect.FileDescriptor + +var file_notifier_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x36, 0x0a, 0x0c, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x20, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x61, 0x0a, 0x08, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x06, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x12, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x12, 0x28, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x12, 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0d, 0x5a, + 0x0b, 0x2e, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_notifier_proto_rawDescOnce sync.Once + file_notifier_proto_rawDescData = file_notifier_proto_rawDesc +) + +func file_notifier_proto_rawDescGZIP() []byte { + file_notifier_proto_rawDescOnce.Do(func() { + file_notifier_proto_rawDescData = protoimpl.X.CompressGZIP(file_notifier_proto_rawDescData) + }) + return file_notifier_proto_rawDescData +} + +var file_notifier_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_notifier_proto_goTypes = []interface{}{ + (*Notification)(nil), // 0: proto.Notification + (*Config)(nil), // 1: proto.Config + (*Empty)(nil), // 2: proto.Empty +} +var file_notifier_proto_depIdxs = []int32{ + 0, // 0: proto.Notifier.Notify:input_type -> proto.Notification + 1, // 1: proto.Notifier.Configure:input_type -> proto.Config + 2, // 2: proto.Notifier.Notify:output_type -> proto.Empty + 2, // 3: proto.Notifier.Configure:output_type -> proto.Empty + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_notifier_proto_init() } +func file_notifier_proto_init() { + if File_notifier_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_notifier_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Notification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_notifier_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Config); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_notifier_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_notifier_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_notifier_proto_goTypes, + DependencyIndexes: file_notifier_proto_depIdxs, + MessageInfos: file_notifier_proto_msgTypes, + }.Build() + File_notifier_proto = out.File + file_notifier_proto_rawDesc = nil + file_notifier_proto_goTypes = nil + file_notifier_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// NotifierClient is the client API for Notifier service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NotifierClient interface { + Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) + Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) +} + +type notifierClient struct { + cc grpc.ClientConnInterface +} + +func NewNotifierClient(cc grpc.ClientConnInterface) NotifierClient { + return ¬ifierClient{cc} +} + +func (c *notifierClient) Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/proto.Notifier/Notify", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notifierClient) Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/proto.Notifier/Configure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NotifierServer is the server API for Notifier service. +type NotifierServer interface { + Notify(context.Context, *Notification) (*Empty, error) + Configure(context.Context, *Config) (*Empty, error) +} + +// UnimplementedNotifierServer can be embedded to have forward compatible implementations. +type UnimplementedNotifierServer struct { +} + +func (*UnimplementedNotifierServer) Notify(context.Context, *Notification) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Notify not implemented") +} +func (*UnimplementedNotifierServer) Configure(context.Context, *Config) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") +} + +func RegisterNotifierServer(s *grpc.Server, srv NotifierServer) { + s.RegisterService(&_Notifier_serviceDesc, srv) +} + +func _Notifier_Notify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Notification) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotifierServer).Notify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Notifier/Notify", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotifierServer).Notify(ctx, req.(*Notification)) + } + return interceptor(ctx, in, info, handler) +} + +func _Notifier_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Config) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotifierServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Notifier/Configure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotifierServer).Configure(ctx, req.(*Config)) + } + return interceptor(ctx, in, info, handler) +} + +var _Notifier_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Notifier", + HandlerType: (*NotifierServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Notify", + Handler: _Notifier_Notify_Handler, + }, + { + MethodName: "Configure", + Handler: _Notifier_Configure_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "notifier.proto", +} diff --git a/pkg/protobufs/notifier.proto b/pkg/protobufs/notifier.proto new file mode 100644 index 0000000..b1c98a1 --- /dev/null +++ b/pkg/protobufs/notifier.proto @@ -0,0 +1,19 @@ +syntax = "proto3" ; +package proto; +option go_package = ".;protobufs"; + +message Notification { + string text = 1 ; + string name = 2 ; +} + +message Config { + bytes config = 2 ; +} + +message Empty {} + +service Notifier { + rpc Notify(Notification) returns (Empty); + rpc Configure(Config) returns (Empty); +} \ No newline at end of file diff --git a/pkg/protobufs/plugin_interface.go b/pkg/protobufs/plugin_interface.go new file mode 100644 index 0000000..fc89b2f --- /dev/null +++ b/pkg/protobufs/plugin_interface.go @@ -0,0 +1,47 @@ +package protobufs + +import ( + "context" + + plugin "github.com/hashicorp/go-plugin" + "google.golang.org/grpc" +) + +type Notifier interface { + Notify(ctx context.Context, notification *Notification) (*Empty, error) + Configure(ctx context.Context, config *Config) (*Empty, error) +} + +// This is the implementation of plugin.NotifierPlugin so we can serve/consume this. +type NotifierPlugin struct { + // GRPCPlugin must still implement the Plugin interface + plugin.Plugin + // Concrete implementation, written in Go. This is only used for plugins + // that are written in Go. + Impl Notifier +} + +type GRPCClient struct{ client NotifierClient } + +func (m *GRPCClient) Notify(ctx context.Context, notification *Notification) (*Empty, error) { + _, err := m.client.Notify(context.Background(), notification) + return &Empty{}, err +} + +func (m *GRPCClient) Configure(ctx context.Context, config *Config) (*Empty, error) { + _, err := m.client.Configure(context.Background(), config) + return &Empty{}, err +} + +type GRPCServer struct { + Impl Notifier +} + +func (p *NotifierPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + RegisterNotifierServer(s, p.Impl) + return nil +} + +func (p *NotifierPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCClient{client: NewNotifierClient(c)}, nil +} diff --git a/pkg/time/AUTHORS b/pkg/time/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/pkg/time/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/pkg/time/CONTRIBUTING.md b/pkg/time/CONTRIBUTING.md new file mode 100644 index 0000000..d0485e8 --- /dev/null +++ b/pkg/time/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/pkg/time/CONTRIBUTORS b/pkg/time/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/pkg/time/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/pkg/time/LICENSE b/pkg/time/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/pkg/time/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/time/PATENTS b/pkg/time/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/pkg/time/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/pkg/time/README.md b/pkg/time/README.md new file mode 100644 index 0000000..ce9becd --- /dev/null +++ b/pkg/time/README.md @@ -0,0 +1,17 @@ +# Go Time + +This repository provides supplementary Go time packages. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/time`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/time`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the time repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/time:" in the +subject line, so it is easy to find. diff --git a/pkg/time/rate/rate.go b/pkg/time/rate/rate.go new file mode 100644 index 0000000..fdc7e19 --- /dev/null +++ b/pkg/time/rate/rate.go @@ -0,0 +1,476 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "context" + "fmt" + "math" + "sync" + "time" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +type Limiter struct { + limit Limit + burst int + + mu sync.Mutex + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +type RateLimiter interface { + Allow() bool + AllowN(time.Time, int) bool + GetTokensCount() float64 + GetTokensCountAt(time.Time) float64 + Dump() Lstate + Load(Lstate) +} + +type Lstate struct { + Limit Limit + Burst int + Tokens float64 + Last time.Time + LastEvent time.Time +} + +func (lim *Limiter) Dump() Lstate { + st := Lstate{} + st.Limit = lim.limit + st.Burst = lim.burst + st.Tokens = lim.tokens + st.Last = lim.last + st.LastEvent = lim.lastEvent + return st +} + +func (lim *Limiter) Load(st Lstate) { + lim.limit = st.Limit + lim.burst = st.Burst + lim.tokens = st.Tokens + lim.last = st.Last + lim.lastEvent = st.LastEvent +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + return lim.burst +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow is shorthand for AllowN(time.Now(), 1). +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time now. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(now time.Time, n int) bool { + return lim.reserveN(now, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(1<<63 - 1) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(now time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(now) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) + return +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(now time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + now, _, tokens := r.lim.advance(now) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = now + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(now) { + r.lim.lastEvent = prevEvent + } + } + + return +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// ReserveN returns false if n exceeds the Limiter's burst size. +// Usage example: +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { + r := lim.reserveN(now, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + lim.mu.Lock() + burst := lim.burst + limit := lim.limit + lim.mu.Unlock() + + if n > burst && limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + now := time.Now() + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(now) + } + // Reserve + r := lim.reserveN(now, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait if necessary + delay := r.DelayFrom(now) + if delay == 0 { + return nil + } + t := time.NewTimer(delay) + defer t.Stop() + select { + case <-t.C: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.limit = newLimit +} + +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.burst = newBurst +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + + if lim.limit == Inf { + lim.mu.Unlock() + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: now, + } + } + + now, last, tokens := lim.advance(now) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = now.Add(waitDuration) + } + + // Update state + if ok { + lim.last = now + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } else { + lim.last = last + } + + lim.mu.Unlock() + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { + last := lim.last + if now.Before(last) { + last = now + } + + // Avoid making delta overflow below when last is very old. + maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) + elapsed := now.Sub(last) + if elapsed > maxElapsed { + elapsed = maxElapsed + } + + // Calculate the new number of tokens, due to time that passed. + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + + return now, last, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + seconds := tokens / float64(limit) + return time.Nanosecond * time.Duration(1e9*seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + // Split the integer and fractional parts ourself to minimize rounding errors. + // See golang.org/issues/34861. + sec := float64(d/time.Second) * float64(limit) + nsec := float64(d%time.Second) * float64(limit) + return sec + nsec/1e9 +} + +//return the number of token available in the bucket +func (lim *Limiter) GetTokensCount() float64 { + _, _, tokens := lim.advance(time.Now()) + return tokens +} + +//return the number of token available in the bucket +func (lim *Limiter) GetTokensCountAt(t time.Time) float64 { + _, _, tokens := lim.advance(t) + return tokens +} + +//A rate limiter that doesn't limit anything +//this is compliant to the earlier interface +type AlwaysFull struct { +} + +func (af *AlwaysFull) Dump() Lstate { + return Lstate{} +} + +func (af *AlwaysFull) Load(st Lstate) { + return +} + +func (af *AlwaysFull) Allow() bool { + return true +} + +func (af *AlwaysFull) AllowN(time.Time, int) bool { + return true +} + +func (af *AlwaysFull) GetTokensCount() float64 { + return float64(int(^uint(0) >> 1)) +} + +func (af *AlwaysFull) GetTokensCountAt(t time.Time) float64 { + return float64(int(^uint(0) >> 1)) +} diff --git a/pkg/time/rate/rate_test.go b/pkg/time/rate/rate_test.go new file mode 100644 index 0000000..6df9412 --- /dev/null +++ b/pkg/time/rate/rate_test.go @@ -0,0 +1,483 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package rate + +import ( + "context" + "math" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestLimit(t *testing.T) { + if Limit(10) == Inf { + t.Errorf("Limit(10) == Inf should be false") + } +} + +func closeEnough(a, b Limit) bool { + return (math.Abs(float64(a)/float64(b)) - 1.0) < 1e-9 +} + +func TestEvery(t *testing.T) { + cases := []struct { + interval time.Duration + lim Limit + }{ + {0, Inf}, + {-1, Inf}, + {1 * time.Nanosecond, Limit(1e9)}, + {1 * time.Microsecond, Limit(1e6)}, + {1 * time.Millisecond, Limit(1e3)}, + {10 * time.Millisecond, Limit(100)}, + {100 * time.Millisecond, Limit(10)}, + {1 * time.Second, Limit(1)}, + {2 * time.Second, Limit(0.5)}, + {time.Duration(2.5 * float64(time.Second)), Limit(0.4)}, + {4 * time.Second, Limit(0.25)}, + {10 * time.Second, Limit(0.1)}, + {time.Duration(math.MaxInt64), Limit(1e9 / float64(math.MaxInt64))}, + } + for _, tc := range cases { + lim := Every(tc.interval) + if !closeEnough(lim, tc.lim) { + t.Errorf("Every(%v) = %v want %v", tc.interval, lim, tc.lim) + } + } +} + +const ( + d = 100 * time.Millisecond +) + +var ( + t0 = time.Now() + t1 = t0.Add(time.Duration(1) * d) + t2 = t0.Add(time.Duration(2) * d) + t3 = t0.Add(time.Duration(3) * d) + t4 = t0.Add(time.Duration(4) * d) + t5 = t0.Add(time.Duration(5) * d) + t9 = t0.Add(time.Duration(9) * d) +) + +type allow struct { + t time.Time + n int + ok bool +} + +func run(t *testing.T, lim *Limiter, allows []allow) { + for i, allow := range allows { + ok := lim.AllowN(allow.t, allow.n) + if ok != allow.ok { + t.Errorf("step %d: lim.AllowN(%v, %v) = %v want %v", + i, allow.t, allow.n, ok, allow.ok) + } + } +} + +func TestLimiterBurst1(t *testing.T) { + run(t, NewLimiter(10, 1), []allow{ + {t0, 1, true}, + {t0, 1, false}, + {t0, 1, false}, + {t1, 1, true}, + {t1, 1, false}, + {t1, 1, false}, + {t2, 2, false}, // burst size is 1, so n=2 always fails + {t2, 1, true}, + {t2, 1, false}, + }) +} + +func TestLimiterBurst3(t *testing.T) { + run(t, NewLimiter(10, 3), []allow{ + {t0, 2, true}, + {t0, 2, false}, + {t0, 1, true}, + {t0, 1, false}, + {t1, 4, false}, + {t2, 1, true}, + {t3, 1, true}, + {t4, 1, true}, + {t4, 1, true}, + {t4, 1, false}, + {t4, 1, false}, + {t9, 3, true}, + {t9, 0, true}, + }) +} + +func TestLimiterJumpBackwards(t *testing.T) { + run(t, NewLimiter(10, 3), []allow{ + {t1, 1, true}, // start at t1 + {t0, 1, true}, // jump back to t0, two tokens remain + {t0, 1, true}, + {t0, 1, false}, + {t0, 1, false}, + {t1, 1, true}, // got a token + {t1, 1, false}, + {t1, 1, false}, + {t2, 1, true}, // got another token + {t2, 1, false}, + {t2, 1, false}, + }) +} + +// Ensure that tokensFromDuration doesn't produce +// rounding errors by truncating nanoseconds. +// See golang.org/issues/34861. +func TestLimiter_noTruncationErrors(t *testing.T) { + if !NewLimiter(0.7692307692307693, 1).Allow() { + t.Fatal("expected true") + } +} + +func TestSimultaneousRequests(t *testing.T) { + const ( + limit = 1 + burst = 5 + numRequests = 15 + ) + var ( + wg sync.WaitGroup + numOK = uint32(0) + ) + + // Very slow replenishing bucket. + lim := NewLimiter(limit, burst) + + // Tries to take a token, atomically updates the counter and decreases the wait + // group counter. + f := func() { + defer wg.Done() + if ok := lim.Allow(); ok { + atomic.AddUint32(&numOK, 1) + } + } + + wg.Add(numRequests) + for i := 0; i < numRequests; i++ { + go f() + } + wg.Wait() + if numOK != burst { + t.Errorf("numOK = %d, want %d", numOK, burst) + } +} + +func TestLongRunningQPS(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + if runtime.GOOS == "openbsd" { + t.Skip("low resolution time.Sleep invalidates test (golang.org/issue/14183)") + return + } + if runtime.GOOS == "windows" { + t.Skip("test is unreliable on windows") + } + + // The test runs for a few seconds executing many requests and then checks + // that overall number of requests is reasonable. + const ( + limit = 100 + burst = 100 + ) + var numOK = int32(0) + + lim := NewLimiter(limit, burst) + + var wg sync.WaitGroup + f := func() { + if ok := lim.Allow(); ok { + atomic.AddInt32(&numOK, 1) + } + wg.Done() + } + + start := time.Now() + end := start.Add(5 * time.Second) + for time.Now().Before(end) { + wg.Add(1) + go f() + + // This will still offer ~500 requests per second, but won't consume + // outrageous amount of CPU. + time.Sleep(2 * time.Millisecond) + } + wg.Wait() + elapsed := time.Since(start) + ideal := burst + (limit * float64(elapsed) / float64(time.Second)) + + // We should never get more requests than allowed. + if want := int32(ideal + 1); numOK > want { + t.Errorf("numOK = %d, want %d (ideal %f)", numOK, want, ideal) + } + // We should get very close to the number of requests allowed. + if want := int32(0.999 * ideal); numOK < want { + t.Errorf("numOK = %d, want %d (ideal %f)", numOK, want, ideal) + } +} + +type request struct { + t time.Time + n int + act time.Time + ok bool +} + +// dFromDuration converts a duration to a multiple of the global constant d +func dFromDuration(dur time.Duration) int { + // Adding a millisecond to be swallowed by the integer division + // because we don't care about small inaccuracies + return int((dur + time.Millisecond) / d) +} + +// dSince returns multiples of d since t0 +func dSince(t time.Time) int { + return dFromDuration(t.Sub(t0)) +} + +func runReserve(t *testing.T, lim *Limiter, req request) *Reservation { + return runReserveMax(t, lim, req, InfDuration) +} + +func runReserveMax(t *testing.T, lim *Limiter, req request, maxReserve time.Duration) *Reservation { + r := lim.reserveN(req.t, req.n, maxReserve) + if r.ok && (dSince(r.timeToAct) != dSince(req.act)) || r.ok != req.ok { + t.Errorf("lim.reserveN(t%d, %v, %v) = (t%d, %v) want (t%d, %v)", + dSince(req.t), req.n, maxReserve, dSince(r.timeToAct), r.ok, dSince(req.act), req.ok) + } + return &r +} + +func TestSimpleReserve(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + runReserve(t, lim, request{t0, 2, t2, true}) + runReserve(t, lim, request{t3, 2, t4, true}) +} + +func TestMix(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 3, t1, false}) // should return false because n > Burst + runReserve(t, lim, request{t0, 2, t0, true}) + run(t, lim, []allow{{t1, 2, false}}) // not enough tokens - don't allow + runReserve(t, lim, request{t1, 2, t2, true}) + run(t, lim, []allow{{t1, 1, false}}) // negative tokens - don't allow + run(t, lim, []allow{{t3, 1, true}}) +} + +func TestCancelInvalid(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 3, t3, false}) + r.CancelAt(t0) // should have no effect + runReserve(t, lim, request{t0, 2, t2, true}) // did not get extra tokens +} + +func TestCancelLast(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 2, t2, true}) + r.CancelAt(t1) // got 2 tokens back + runReserve(t, lim, request{t1, 2, t2, true}) +} + +func TestCancelTooLate(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 2, t2, true}) + r.CancelAt(t3) // too late to cancel - should have no effect + runReserve(t, lim, request{t3, 2, t4, true}) +} + +func TestCancel0Tokens(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 1, t1, true}) + runReserve(t, lim, request{t0, 1, t2, true}) + r.CancelAt(t0) // got 0 tokens back + runReserve(t, lim, request{t0, 1, t3, true}) +} + +func TestCancel1Token(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 2, t2, true}) + runReserve(t, lim, request{t0, 1, t3, true}) + r.CancelAt(t2) // got 1 token back + runReserve(t, lim, request{t2, 2, t4, true}) +} + +func TestCancelMulti(t *testing.T) { + lim := NewLimiter(10, 4) + + runReserve(t, lim, request{t0, 4, t0, true}) + rA := runReserve(t, lim, request{t0, 3, t3, true}) + runReserve(t, lim, request{t0, 1, t4, true}) + rC := runReserve(t, lim, request{t0, 1, t5, true}) + rC.CancelAt(t1) // get 1 token back + rA.CancelAt(t1) // get 2 tokens back, as if C was never reserved + runReserve(t, lim, request{t1, 3, t5, true}) +} + +func TestReserveJumpBack(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t1, 2, t1, true}) // start at t1 + runReserve(t, lim, request{t0, 1, t1, true}) // should violate Limit,Burst + runReserve(t, lim, request{t2, 2, t3, true}) +} + +func TestReserveJumpBackCancel(t *testing.T) { + lim := NewLimiter(10, 2) + + runReserve(t, lim, request{t1, 2, t1, true}) // start at t1 + r := runReserve(t, lim, request{t1, 2, t3, true}) + runReserve(t, lim, request{t1, 1, t4, true}) + r.CancelAt(t0) // cancel at t0, get 1 token back + runReserve(t, lim, request{t1, 2, t4, true}) // should violate Limit,Burst +} + +func TestReserveSetLimit(t *testing.T) { + lim := NewLimiter(5, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + runReserve(t, lim, request{t0, 2, t4, true}) + lim.SetLimitAt(t2, 10) + runReserve(t, lim, request{t2, 1, t4, true}) // violates Limit and Burst +} + +func TestReserveSetBurst(t *testing.T) { + lim := NewLimiter(5, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + runReserve(t, lim, request{t0, 2, t4, true}) + lim.SetBurstAt(t3, 4) + runReserve(t, lim, request{t0, 4, t9, true}) // violates Limit and Burst +} + +func TestReserveSetLimitCancel(t *testing.T) { + lim := NewLimiter(5, 2) + + runReserve(t, lim, request{t0, 2, t0, true}) + r := runReserve(t, lim, request{t0, 2, t4, true}) + lim.SetLimitAt(t2, 10) + r.CancelAt(t2) // 2 tokens back + runReserve(t, lim, request{t2, 2, t3, true}) +} + +func TestReserveMax(t *testing.T) { + lim := NewLimiter(10, 2) + maxT := d + + runReserveMax(t, lim, request{t0, 2, t0, true}, maxT) + runReserveMax(t, lim, request{t0, 1, t1, true}, maxT) // reserve for close future + runReserveMax(t, lim, request{t0, 1, t2, false}, maxT) // time to act too far in the future +} + +type wait struct { + name string + ctx context.Context + n int + delay int // in multiples of d + nilErr bool +} + +func runWait(t *testing.T, lim *Limiter, w wait) { + start := time.Now() + err := lim.WaitN(w.ctx, w.n) + delay := time.Since(start) + if (w.nilErr && err != nil) || (!w.nilErr && err == nil) || w.delay != dFromDuration(delay) { + errString := "" + if !w.nilErr { + errString = "" + } + t.Errorf("lim.WaitN(%v, lim, %v) = %v with delay %v ; want %v with delay %v", + w.name, w.n, err, delay, errString, d*time.Duration(w.delay)) + } +} + +func TestWaitSimple(t *testing.T) { + lim := NewLimiter(10, 3) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + runWait(t, lim, wait{"already-cancelled", ctx, 1, 0, false}) + + runWait(t, lim, wait{"exceed-burst-error", context.Background(), 4, 0, false}) + + runWait(t, lim, wait{"act-now", context.Background(), 2, 0, true}) + runWait(t, lim, wait{"act-later", context.Background(), 3, 2, true}) +} + +func TestWaitCancel(t *testing.T) { + lim := NewLimiter(10, 3) + + ctx, cancel := context.WithCancel(context.Background()) + runWait(t, lim, wait{"act-now", ctx, 2, 0, true}) // after this lim.tokens = 1 + go func() { + time.Sleep(d) + cancel() + }() + runWait(t, lim, wait{"will-cancel", ctx, 3, 1, false}) + // should get 3 tokens back, and have lim.tokens = 2 + t.Logf("tokens:%v last:%v lastEvent:%v", lim.tokens, lim.last, lim.lastEvent) + runWait(t, lim, wait{"act-now-after-cancel", context.Background(), 2, 0, true}) +} + +func TestWaitTimeout(t *testing.T) { + lim := NewLimiter(10, 3) + + ctx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + runWait(t, lim, wait{"act-now", ctx, 2, 0, true}) + runWait(t, lim, wait{"w-timeout-err", ctx, 3, 0, false}) +} + +func TestWaitInf(t *testing.T) { + lim := NewLimiter(Inf, 0) + + runWait(t, lim, wait{"exceed-burst-no-error", context.Background(), 3, 0, true}) +} + +func BenchmarkAllowN(b *testing.B) { + lim := NewLimiter(Every(1*time.Second), 1) + now := time.Now() + b.ReportAllocs() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + lim.AllowN(now, 1) + } + }) +} + +func BenchmarkWaitNNoDelay(b *testing.B) { + lim := NewLimiter(Limit(b.N), b.N) + ctx := context.Background() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := lim.WaitN(ctx, 1); err != nil { + b.Errorf("failed limiter : %s", err) + } + } +} diff --git a/pkg/types/constants.go b/pkg/types/constants.go new file mode 100644 index 0000000..3fe83de --- /dev/null +++ b/pkg/types/constants.go @@ -0,0 +1,5 @@ +package types + +const ApiKeyAuthType = "api-key" +const TlsAuthType = "tls" +const PasswordAuthType = "password" diff --git a/pkg/types/dataset.go b/pkg/types/dataset.go new file mode 100644 index 0000000..3074f18 --- /dev/null +++ b/pkg/types/dataset.go @@ -0,0 +1,74 @@ +package types + +import ( + "fmt" + "io" + "net/http" + "os" + "path" + + log "github.com/sirupsen/logrus" +) + +type DataSource struct { + SourceURL string `yaml:"source_url"` + DestPath string `yaml:"dest_file"` + Type string `yaml:"type"` +} + +type DataSet struct { + Data []*DataSource `yaml:"data,omitempty"` +} + +func downloadFile(url string, destPath string) error { + log.Debugf("downloading %s in %s", url, destPath) + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("download response 'HTTP %d' : %s", resp.StatusCode, string(body)) + } + + file, err := os.OpenFile(destPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + _, err = file.WriteString(string(body)) + if err != nil { + return err + } + + err = file.Sync() + if err != nil { + return err + } + + return nil +} + +func GetData(data []*DataSource, dataDir string) error { + for _, dataS := range data { + destPath := path.Join(dataDir, dataS.DestPath) + log.Infof("downloading data '%s' in '%s'", dataS.SourceURL, destPath) + err := downloadFile(dataS.SourceURL, destPath) + if err != nil { + return err + } + } + + return nil +} diff --git a/pkg/types/dataset_test.go b/pkg/types/dataset_test.go new file mode 100644 index 0000000..956e331 --- /dev/null +++ b/pkg/types/dataset_test.go @@ -0,0 +1,43 @@ +package types + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/jarcoal/httpmock" +) + +func TestDownladFile(t *testing.T) { + examplePath := "./example.txt" + defer os.Remove(examplePath) + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + //OK + httpmock.RegisterResponder( + "GET", + "https://example.com/xx", + httpmock.NewStringResponder(200, "example content oneoneone"), + ) + httpmock.RegisterResponder( + "GET", + "https://example.com/x", + httpmock.NewStringResponder(404, "not found"), + ) + err := downloadFile("https://example.com/xx", examplePath) + assert.NoError(t, err) + content, err := os.ReadFile(examplePath) + assert.Equal(t, "example content oneoneone", string(content)) + assert.NoError(t, err) + //bad uri + err = downloadFile("https://zz.com", examplePath) + assert.Error(t, err) + //404 + err = downloadFile("https://example.com/x", examplePath) + assert.Error(t, err) + //bad target + err = downloadFile("https://example.com/xx", "") + assert.Error(t, err) +} diff --git a/pkg/types/event.go b/pkg/types/event.go new file mode 100644 index 0000000..c59f4fd --- /dev/null +++ b/pkg/types/event.go @@ -0,0 +1,108 @@ +package types + +import ( + "time" + + log "github.com/sirupsen/logrus" + + "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +const ( + LOG = iota + OVFLW +) + +//Event is the structure representing a runtime event (log or overflow) +type Event struct { + /* is it a log or an overflow */ + Type int `yaml:"Type,omitempty" json:"Type,omitempty"` //Can be types.LOG (0) or types.OVFLOW (1) + ExpectMode int `yaml:"ExpectMode,omitempty" json:"ExpectMode,omitempty"` //how to buckets should handle event : leaky.TIMEMACHINE or leaky.LIVE + Whitelisted bool `yaml:"Whitelisted,omitempty" json:"Whitelisted,omitempty"` + WhitelistReason string `yaml:"WhitelistReason,omitempty" json:"whitelist_reason,omitempty"` + //should add whitelist reason ? + /* the current stage of the line being parsed */ + Stage string `yaml:"Stage,omitempty" json:"Stage,omitempty"` + /* original line (produced by acquisition) */ + Line Line `yaml:"Line,omitempty" json:"Line,omitempty"` + /* output of groks */ + Parsed map[string]string `yaml:"Parsed,omitempty" json:"Parsed,omitempty"` + /* output of enrichment */ + Enriched map[string]string `yaml:"Enriched,omitempty" json:"Enriched,omitempty"` + /* Overflow */ + Overflow RuntimeAlert `yaml:"Overflow,omitempty" json:"Alert,omitempty"` + Time time.Time `yaml:"Time,omitempty" json:"Time,omitempty"` //parsed time `json:"-"` `` + StrTime string `yaml:"StrTime,omitempty" json:"StrTime,omitempty"` + StrTimeFormat string `yaml:"StrTimeFormat,omitempty" json:"StrTimeFormat,omitempty"` + MarshaledTime string `yaml:"MarshaledTime,omitempty" json:"MarshaledTime,omitempty"` + Process bool `yaml:"Process,omitempty" json:"Process,omitempty"` //can be set to false to avoid processing line + /* Meta is the only part that will make it to the API - it should be normalized */ + Meta map[string]string `yaml:"Meta,omitempty" json:"Meta,omitempty"` +} + +func (e *Event) GetType() string { + if e.Type == OVFLW { + return "overflow" + } else if e.Type == LOG { + return "log" + } else { + log.Warningf("unknown event type for %+v", e) + return "unknown" + } +} + +func (e *Event) GetMeta(key string) string { + if e.Type == OVFLW { + for _, alert := range e.Overflow.APIAlerts { + for _, event := range alert.Events { + if event.GetMeta(key) != "" { + return event.GetMeta(key) + } + } + } + } else if e.Type == LOG { + for k, v := range e.Meta { + if k == key { + return v + } + } + } + return "" +} + +//Move in leakybuckets +const ( + Undefined = "" + Ip = "Ip" + Range = "Range" + Filter = "Filter" + Country = "Country" + AS = "AS" +) + +//Move in leakybuckets +type ScopeType struct { + Scope string `yaml:"type"` + Filter string `yaml:"expression"` + RunTimeFilter *vm.Program +} + +type RuntimeAlert struct { + Mapkey string `yaml:"MapKey,omitempty" json:"MapKey,omitempty"` + BucketId string `yaml:"BucketId,omitempty" json:"BucketId,omitempty"` + Whitelisted bool `yaml:"Whitelisted,omitempty" json:"Whitelisted,omitempty"` + Reprocess bool `yaml:"Reprocess,omitempty" json:"Reprocess,omitempty"` + Sources map[string]models.Source `yaml:"Sources,omitempty" json:"Sources,omitempty"` + Alert *models.Alert `yaml:"Alert,omitempty" json:"Alert,omitempty"` //this one is a pointer to APIAlerts[0] for convenience. + //APIAlerts will be populated at the end when there is more than one source + APIAlerts []models.Alert `yaml:"APIAlerts,omitempty" json:"APIAlerts,omitempty"` +} + +func (r RuntimeAlert) GetSources() []string { + ret := make([]string, 0) + for key := range r.Sources { + ret = append(ret, key) + } + return ret +} diff --git a/pkg/types/grok_pattern.go b/pkg/types/grok_pattern.go new file mode 100644 index 0000000..53e2765 --- /dev/null +++ b/pkg/types/grok_pattern.go @@ -0,0 +1,41 @@ +package types + +import ( + "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/grokky" +) + +//Used mostly for statics +type ExtraField struct { + //if the target is indicated by name Struct.Field etc, + TargetByName string `yaml:"target,omitempty"` + //if the target field is in Event map + Parsed string `yaml:"parsed,omitempty"` + //if the target field is in Meta map + Meta string `yaml:"meta,omitempty"` + //if the target field is in Enriched map + Enriched string `yaml:"enriched,omitempty"` + //the source is a static value + Value string `yaml:"value,omitempty"` + //or the result of an Expression + ExpValue string `yaml:"expression,omitempty"` + RunTimeValue *vm.Program `json:"-"` //the actual compiled filter + //or an enrichment method + Method string `yaml:"method,omitempty"` +} + +type GrokPattern struct { + //the field to which regexp is going to apply + TargetField string `yaml:"apply_on,omitempty"` + //the grok/regexp by name (loaded from patterns/*) + RegexpName string `yaml:"name,omitempty"` + //a proper grok pattern + RegexpValue string `yaml:"pattern,omitempty"` + //the runtime form of regexpname / regexpvalue + RunTimeRegexp *grokky.Pattern `json:"-"` //the actual regexp + //the output of the expression is going to be the source for regexp + ExpValue string `yaml:"expression,omitempty"` + RunTimeValue *vm.Program `json:"-"` //the actual compiled filter + //a grok can contain statics that apply if pattern is successful + Statics []ExtraField `yaml:"statics,omitempty"` +} diff --git a/pkg/types/ip.go b/pkg/types/ip.go new file mode 100644 index 0000000..647fb4a --- /dev/null +++ b/pkg/types/ip.go @@ -0,0 +1,108 @@ +package types + +import ( + "encoding/binary" + "fmt" + "math" + "net" + "strings" + + "github.com/pkg/errors" +) + +func LastAddress(n net.IPNet) net.IP { + ip := n.IP.To4() + if ip == nil { + ip = n.IP + return net.IP{ + ip[0] | ^n.Mask[0], ip[1] | ^n.Mask[1], ip[2] | ^n.Mask[2], + ip[3] | ^n.Mask[3], ip[4] | ^n.Mask[4], ip[5] | ^n.Mask[5], + ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], + ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], + ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], + ip[15] | ^n.Mask[15]} + } + + return net.IPv4( + ip[0]|^n.Mask[0], + ip[1]|^n.Mask[1], + ip[2]|^n.Mask[2], + ip[3]|^n.Mask[3]) +} + +/*returns a range for any ip or range*/ +func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { + if strings.Contains(anyIP, "/") { + _, net, err := net.ParseCIDR(anyIP) + if err != nil { + return -1, 0, 0, 0, 0, errors.Wrapf(err, "while parsing range %s", anyIP) + } + return Range2Ints(*net) + } + + ip := net.ParseIP(anyIP) + if ip == nil { + return -1, 0, 0, 0, 0, fmt.Errorf("invalid address") + } + + sz, start, end, err := IP2Ints(ip) + if err != nil { + return -1, 0, 0, 0, 0, errors.Wrapf(err, "while parsing ip %s", anyIP) + } + + return sz, start, end, start, end, nil +} + +/*size (16|4), nw_start, suffix_start, nw_end, suffix_end, error*/ +func Range2Ints(network net.IPNet) (int, int64, int64, int64, int64, error) { + + szStart, nwStart, sfxStart, err := IP2Ints(network.IP) + if err != nil { + return -1, 0, 0, 0, 0, errors.Wrap(err, "converting first ip in range") + } + lastAddr := LastAddress(network) + szEnd, nwEnd, sfxEnd, err := IP2Ints(lastAddr) + if err != nil { + return -1, 0, 0, 0, 0, errors.Wrap(err, "transforming last address of range") + } + if szEnd != szStart { + return -1, 0, 0, 0, 0, fmt.Errorf("inconsistent size for range first(%d) and last(%d) ip", szStart, szEnd) + } + return szStart, nwStart, sfxStart, nwEnd, sfxEnd, nil +} + +func uint2int(u uint64) int64 { + var ret int64 + if u == math.MaxInt64 { + ret = 0 + } else if u == math.MaxUint64 { + ret = math.MaxInt64 + } else if u > math.MaxInt64 { + u -= math.MaxInt64 + ret = int64(u) + } else { + ret = int64(u) + ret -= math.MaxInt64 + } + return ret +} + +/*size (16|4), network, suffix, error*/ +func IP2Ints(pip net.IP) (int, int64, int64, error) { + var ip_nw, ip_sfx uint64 + + pip4 := pip.To4() + pip16 := pip.To16() + + if pip4 != nil { + ip_nw32 := binary.BigEndian.Uint32(pip4) + + return 4, uint2int(uint64(ip_nw32)), uint2int(ip_sfx), nil + } else if pip16 != nil { + ip_nw = binary.BigEndian.Uint64(pip16[0:8]) + ip_sfx = binary.BigEndian.Uint64(pip16[8:16]) + return 16, uint2int(ip_nw), uint2int(ip_sfx), nil + } else { + return -1, 0, 0, fmt.Errorf("unexpected len %d for %s", len(pip), pip) + } +} diff --git a/pkg/types/ip_test.go b/pkg/types/ip_test.go new file mode 100644 index 0000000..f8c14b1 --- /dev/null +++ b/pkg/types/ip_test.go @@ -0,0 +1,220 @@ +package types + +import ( + "math" + "net" + "strings" + "testing" +) + +func TestIP2Int(t *testing.T) { + + tEmpty := net.IP{} + _, _, _, err := IP2Ints(tEmpty) + if !strings.Contains(err.Error(), "unexpected len 0 for ") { + t.Fatalf("unexpected: %s", err) + } +} +func TestRange2Int(t *testing.T) { + tEmpty := net.IPNet{} + //empty item + _, _, _, _, _, err := Range2Ints(tEmpty) + if !strings.Contains(err.Error(), "converting first ip in range") { + t.Fatalf("unexpected: %s", err) + } + +} + +func TestAdd2Int(t *testing.T) { + tests := []struct { + in_addr string + exp_sz int + exp_start_ip int64 + exp_start_sfx int64 + exp_end_ip int64 + exp_end_sfx int64 + exp_error string + }{ + { + in_addr: "7FFF:FFFF:FFFF:FFFF:aaaa:aaaa:aaaa:fff7", + + exp_sz: 16, + exp_start_ip: -math.MaxInt64 + 0x7FFFFFFFFFFFFFFF, + exp_start_sfx: -math.MaxInt64 + 0xaaaaaaaaaaaafff7, + exp_end_ip: -math.MaxInt64 + 0x7FFFFFFFFFFFFFFF, + exp_end_sfx: -math.MaxInt64 + 0xaaaaaaaaaaaafff7, + }, + { + in_addr: "aaaa:aaaa:aaaa:aaaa:aaaa:aaaa:aaaa:fff7", + + exp_sz: 16, + exp_start_ip: -math.MaxInt64 + 0xaaaaaaaaaaaaaaaa, + exp_start_sfx: -math.MaxInt64 + 0xaaaaaaaaaaaafff7, + exp_end_ip: -math.MaxInt64 + 0xaaaaaaaaaaaaaaaa, + exp_end_sfx: -math.MaxInt64 + 0xaaaaaaaaaaaafff7, + }, + { + in_addr: "ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff7", + /*ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff*/ + + exp_sz: 16, + exp_start_ip: math.MaxInt64, + exp_start_sfx: -math.MaxInt64 + 0xfffffffffffffff7, + exp_end_ip: math.MaxInt64, + exp_end_sfx: -math.MaxInt64 + 0xfffffffffffffff7, + }, + { + in_addr: "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + /*ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff*/ + + exp_sz: 16, + exp_start_ip: math.MaxInt64, + exp_start_sfx: math.MaxInt64, + exp_end_ip: math.MaxInt64, + exp_end_sfx: math.MaxInt64, + }, + { + in_addr: "::", + /*::*/ + + exp_sz: 16, + exp_start_ip: -math.MaxInt64, + exp_start_sfx: -math.MaxInt64, + exp_end_ip: -math.MaxInt64, + exp_end_sfx: -math.MaxInt64, + }, + { + in_addr: "2001:db8::", + /*2001:db8:: -> 2001:db8::*/ + exp_sz: 16, + exp_start_ip: -math.MaxInt64 + 0x20010DB800000000, + exp_start_sfx: -math.MaxInt64, + exp_end_ip: -math.MaxInt64 + 0x20010DB800000000, + exp_end_sfx: -math.MaxInt64, + }, + { + in_addr: "2001:db8:0000:0000:0000:0000:0000:00ff", + /*2001:db8:0000:0000:0000:0000:0000:00ff*/ + exp_sz: 16, + exp_start_ip: -math.MaxInt64 + 0x20010DB800000000, + exp_start_sfx: -math.MaxInt64 + 0xFF, + exp_end_ip: -math.MaxInt64 + 0x20010DB800000000, + exp_end_sfx: -math.MaxInt64 + 0xFF, + }, + { + in_addr: "1.2.3.4", + /*1.2.3.4*/ + exp_sz: 4, + exp_start_ip: -math.MaxInt64 + 0x01020304, + exp_start_sfx: 0, + exp_end_ip: -math.MaxInt64 + 0x01020304, + exp_end_sfx: 0, + }, + { + in_addr: "::/0", + /*:: -> ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff*/ + + exp_sz: 16, + exp_start_ip: -math.MaxInt64, + exp_start_sfx: -math.MaxInt64, + exp_end_ip: math.MaxInt64, + exp_end_sfx: math.MaxInt64, + }, + { + in_addr: "::/64", + /*:: -> 0000:0000:0000:0000:ffff:ffff:ffff:ffff*/ + exp_sz: 16, + exp_start_ip: -math.MaxInt64, + exp_start_sfx: -math.MaxInt64, + exp_end_ip: -math.MaxInt64, + exp_end_sfx: math.MaxInt64, + }, + { + in_addr: "2001:db8::/109", + /*2001:db8:: -> 2001:db8:0000:0000:0000:0000:0007:ffff*/ + exp_sz: 16, + exp_start_ip: -math.MaxInt64 + 0x20010DB800000000, + exp_start_sfx: -math.MaxInt64, + exp_end_ip: -math.MaxInt64 + 0x20010DB800000000, + exp_end_sfx: -math.MaxInt64 + 0x7FFFF, + }, + { + in_addr: "0.0.0.0/0", + /*0.0.0.0 -> 255.255.255.255*/ + exp_sz: 4, + exp_start_ip: -math.MaxInt64, + exp_start_sfx: 0, + exp_end_ip: -math.MaxInt64 + 0xFFFFFFFF, + exp_end_sfx: 0, + }, + { + in_addr: "0.0.0.0/16", + /*0.0.0.0 -> 0.0.255.255*/ + exp_sz: 4, + exp_start_ip: -math.MaxInt64, + exp_start_sfx: 0, + exp_end_ip: -math.MaxInt64 + 0x0000FFFF, + exp_end_sfx: 0, + }, + { + in_addr: "255.255.0.0/16", + /*255.255.0.0 -> 255.255.255.255*/ + exp_sz: 4, + exp_start_ip: -math.MaxInt64 + 0xFFFF0000, + exp_start_sfx: 0, + exp_end_ip: -math.MaxInt64 + 0xFFFFFFFF, + exp_end_sfx: 0, + }, + { + in_addr: "1.2.3.0/24", + /*1.2.3.0 -> 1.2.3.255*/ + exp_sz: 4, + exp_start_ip: -math.MaxInt64 + 0x01020300, + exp_start_sfx: 0, + exp_end_ip: -math.MaxInt64 + 0x010203FF, + exp_end_sfx: 0, + }, + /*errors*/ + { + in_addr: "xxx/24", + exp_error: "invalid CIDR address", + }, + { + in_addr: "xxx2", + exp_error: "invalid address", + }, + } + + for idx, test := range tests { + sz, start_ip, start_sfx, end_ip, end_sfx, err := Addr2Ints(test.in_addr) + if err != nil && test.exp_error == "" { + t.Fatalf("%d unexpected error : %s", idx, err) + } + if test.exp_error != "" { + if !strings.Contains(err.Error(), test.exp_error) { + t.Fatalf("%d unmatched error : %s != %s", idx, err, test.exp_error) + } + continue //we can skip this one + } + if sz != test.exp_sz { + t.Fatalf("%d unexpected size %d != %d", idx, sz, test.exp_sz) + } + if start_ip != test.exp_start_ip { + t.Fatalf("%d unexpected start_ip %d != %d", idx, start_ip, test.exp_start_ip) + } + if sz == 16 { + if start_sfx != test.exp_start_sfx { + t.Fatalf("%d unexpected start sfx %d != %d", idx, start_sfx, test.exp_start_sfx) + } + } + if end_ip != test.exp_end_ip { + t.Fatalf("%d unexpected end ip %d != %d", idx, end_ip, test.exp_end_ip) + } + if sz == 16 { + if end_sfx != test.exp_end_sfx { + t.Fatalf("%d unexpected end sfx %d != %d", idx, end_sfx, test.exp_end_sfx) + } + } + + } +} diff --git a/pkg/types/line.go b/pkg/types/line.go new file mode 100644 index 0000000..f82e17b --- /dev/null +++ b/pkg/types/line.go @@ -0,0 +1,12 @@ +package types + +import "time" + +type Line struct { + Raw string `yaml:"Raw,omitempty"` + Src string `yaml:"Src,omitempty"` + Time time.Time //acquis time + Labels map[string]string `yaml:"Labels,omitempty"` + Process bool + Module string `yaml:"Module,omitempty"` +} diff --git a/pkg/types/profile.go b/pkg/types/profile.go new file mode 100644 index 0000000..e803421 --- /dev/null +++ b/pkg/types/profile.go @@ -0,0 +1,25 @@ +package types + +import ( + "time" + + "github.com/antonmedv/expr/vm" +) + +/*Action profiles*/ +type RemediationProfile struct { + Apply bool + Ban bool + Slow bool + Captcha bool + Duration string + TimeDuration time.Duration +} +type Profile struct { + Profile string `yaml:"profile"` + Filter string `yaml:"filter"` + Remediation RemediationProfile `yaml:"remediation"` + RunTimeFilter *vm.Program + ApiPush *bool `yaml:"api"` + OutputConfigs []map[string]string `yaml:"outputs,omitempty"` +} diff --git a/pkg/types/utils.go b/pkg/types/utils.go new file mode 100644 index 0000000..342fa63 --- /dev/null +++ b/pkg/types/utils.go @@ -0,0 +1,268 @@ +package types + +import ( + "bufio" + "bytes" + "encoding/gob" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime/debug" + "strconv" + "strings" + "time" + + log "github.com/sirupsen/logrus" + "gopkg.in/natefinch/lumberjack.v2" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" +) + +var logFormatter log.Formatter +var LogOutput *lumberjack.Logger //io.Writer +var logLevel log.Level + +func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level, maxSize int, maxFiles int, maxAge int, compress *bool, forceColors bool) error { + + /*Configure logs*/ + if cfgMode == "file" { + _maxsize := 500 + if maxSize != 0 { + _maxsize = maxSize + } + _maxfiles := 3 + if maxFiles != 0 { + _maxfiles = maxFiles + } + _maxage := 28 + if maxAge != 0 { + _maxage = maxAge + } + _compress := true + if compress != nil { + _compress = *compress + } + /*cf. https://github.com/natefinch/lumberjack/issues/82 + let's create the file beforehand w/ the right perms */ + fname := cfgFolder + "/crowdsec.log" + // check if file exists + _, err := os.Stat(fname) + // create file if not exists, purposefully ignore errors + if os.IsNotExist(err) { + file, _ := os.OpenFile(fname, os.O_RDWR|os.O_CREATE, 0600) + file.Close() + } + + LogOutput = &lumberjack.Logger{ + Filename: fname, + MaxSize: _maxsize, + MaxBackups: _maxfiles, + MaxAge: _maxage, + Compress: _compress, + } + log.SetOutput(LogOutput) + } else if cfgMode != "stdout" { + return fmt.Errorf("log mode '%s' unknown", cfgMode) + } + logLevel = cfgLevel + log.SetLevel(logLevel) + logFormatter = &log.TextFormatter{TimestampFormat: "02-01-2006 15:04:05", FullTimestamp: true, ForceColors: forceColors} + log.SetFormatter(logFormatter) + return nil +} + +func ConfigureLogger(clog *log.Logger) error { + /*Configure logs*/ + if LogOutput != nil { + clog.SetOutput(LogOutput) + } + + if logFormatter != nil { + clog.SetFormatter(logFormatter) + } + clog.SetLevel(logLevel) + return nil +} + +func Clone(a, b interface{}) error { + + buff := new(bytes.Buffer) + enc := gob.NewEncoder(buff) + dec := gob.NewDecoder(buff) + if err := enc.Encode(a); err != nil { + return fmt.Errorf("failed cloning %T", a) + } + if err := dec.Decode(b); err != nil { + return fmt.Errorf("failed cloning %T", b) + } + return nil +} + +func WriteStackTrace(iErr interface{}) string { + tmpfile, err := os.CreateTemp("", "crowdsec-crash.*.txt") + if err != nil { + log.Fatal(err) + } + if _, err := tmpfile.Write([]byte(fmt.Sprintf("error : %+v\n", iErr))); err != nil { + tmpfile.Close() + log.Fatal(err) + } + if _, err := tmpfile.Write([]byte(cwversion.ShowStr())); err != nil { + tmpfile.Close() + log.Fatal(err) + } + if _, err := tmpfile.Write(debug.Stack()); err != nil { + tmpfile.Close() + log.Fatal(err) + } + if err := tmpfile.Close(); err != nil { + log.Fatal(err) + } + return tmpfile.Name() +} + +//CatchPanic is a util func that we should call from all go-routines to ensure proper stacktrace handling +func CatchPanic(component string) { + if r := recover(); r != nil { + log.Errorf("crowdsec - goroutine %s crashed : %s", component, r) + log.Errorf("please report this error to https://github.com/crowdsecurity/crowdsec/") + filename := WriteStackTrace(r) + log.Errorf("stacktrace/report is written to %s : please join it to your issue", filename) + log.Fatalf("crowdsec stopped") + } +} + +func ParseDuration(d string) (time.Duration, error) { + durationStr := d + if strings.HasSuffix(d, "d") { + days := strings.Split(d, "d")[0] + if len(days) == 0 { + return 0, fmt.Errorf("'%s' can't be parsed as duration", d) + } + daysInt, err := strconv.Atoi(days) + if err != nil { + return 0, err + } + durationStr = strconv.Itoa(daysInt*24) + "h" + } + duration, err := time.ParseDuration(durationStr) + if err != nil { + return 0, err + } + return duration, nil +} + +/*help to copy the file, ioutil doesn't offer the feature*/ + +func copyFileContents(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + cerr := out.Close() + if err == nil { + err = cerr + } + }() + if _, err = io.Copy(out, in); err != nil { + return + } + err = out.Sync() + return +} + +/*copy the file, ioutile doesn't offer the feature*/ +func CopyFile(sourceSymLink, destinationFile string) (err error) { + + sourceFile, err := filepath.EvalSymlinks(sourceSymLink) + if err != nil { + log.Infof("Not a symlink : %s", err) + sourceFile = sourceSymLink + } + + sourceFileStat, err := os.Stat(sourceFile) + if err != nil { + return + } + if !sourceFileStat.Mode().IsRegular() { + // cannot copy non-regular files (e.g., directories, + // symlinks, devices, etc.) + return fmt.Errorf("copyFile: non-regular source file %s (%q)", sourceFileStat.Name(), sourceFileStat.Mode().String()) + } + destinationFileStat, err := os.Stat(destinationFile) + if err != nil { + if !os.IsNotExist(err) { + return + } + } else { + if !(destinationFileStat.Mode().IsRegular()) { + return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String()) + } + if os.SameFile(sourceFileStat, destinationFileStat) { + return + } + } + if err = os.Link(sourceFile, destinationFile); err != nil { + err = copyFileContents(sourceFile, destinationFile) + } + return +} + +func StrPtr(s string) *string { + return &s +} + +func IntPtr(i int) *int { + return &i +} + +func Int32Ptr(i int32) *int32 { + return &i +} + +func BoolPtr(b bool) *bool { + return &b +} + +func InSlice(str string, slice []string) bool { + for _, item := range slice { + if str == item { + return true + } + } + return false +} + +func UtcNow() time.Time { + return time.Now().UTC() +} + +func GetLineCountForFile(filepath string) int { + f, err := os.Open(filepath) + if err != nil { + log.Fatalf("unable to open log file %s : %s", filepath, err) + } + defer f.Close() + lc := 0 + fs := bufio.NewScanner(f) + for fs.Scan() { + lc++ + } + return lc +} + +// from https://github.com/acarl005/stripansi +var reStripAnsi = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))") + +func StripAnsiString(str string) string { + // the byte version doesn't strip correctly + return reStripAnsi.ReplaceAllString(str, "") +} diff --git a/pkg/yamlpatch/merge.go b/pkg/yamlpatch/merge.go new file mode 100644 index 0000000..8a61b64 --- /dev/null +++ b/pkg/yamlpatch/merge.go @@ -0,0 +1,168 @@ +// +// from https://github.com/uber-go/config/tree/master/internal/merge +// +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package yamlpatch + +import ( + "bytes" + "fmt" + "io" + + "github.com/pkg/errors" + + yaml "gopkg.in/yaml.v2" +) + +type ( + // YAML has three fundamental types. When unmarshaled into interface{}, + // they're represented like this. + mapping = map[interface{}]interface{} + sequence = []interface{} +) + +// YAML deep-merges any number of YAML sources, with later sources taking +// priority over earlier ones. +// +// Maps are deep-merged. For example, +// {"one": 1, "two": 2} + {"one": 42, "three": 3} +// == {"one": 42, "two": 2, "three": 3} +// Sequences are replaced. For example, +// {"foo": [1, 2, 3]} + {"foo": [4, 5, 6]} +// == {"foo": [4, 5, 6]} +// +// In non-strict mode, duplicate map keys are allowed within a single source, +// with later values overwriting previous ones. Attempting to merge +// mismatched types (e.g., merging a sequence into a map) replaces the old +// value with the new. +// +// Enabling strict mode returns errors in both of the above cases. +func YAML(sources [][]byte, strict bool) (*bytes.Buffer, error) { + var merged interface{} + var hasContent bool + for _, r := range sources { + d := yaml.NewDecoder(bytes.NewReader(r)) + d.SetStrict(strict) + + var contents interface{} + if err := d.Decode(&contents); err == io.EOF { + // Skip empty and comment-only sources, which we should handle + // differently from explicit nils. + continue + } else if err != nil { + return nil, fmt.Errorf("couldn't decode source: %v", err) + } + + hasContent = true + pair, err := merge(merged, contents, strict) + if err != nil { + return nil, err // error is already descriptive enough + } + merged = pair + } + + buf := &bytes.Buffer{} + if !hasContent { + // No sources had any content. To distinguish this from a source with just + // an explicit top-level null, return an empty buffer. + return buf, nil + } + enc := yaml.NewEncoder(buf) + if err := enc.Encode(merged); err != nil { + return nil, errors.Wrap(err, "couldn't re-serialize merged YAML") + } + return buf, nil +} + +func merge(into, from interface{}, strict bool) (interface{}, error) { + // It's possible to handle this with a mass of reflection, but we only need + // to merge whole YAML files. Since we're always unmarshaling into + // interface{}, we only need to handle a few types. This ends up being + // cleaner if we just handle each case explicitly. + if into == nil { + return from, nil + } + if from == nil { + // Allow higher-priority YAML to explicitly nil out lower-priority entries. + return nil, nil + } + if IsScalar(into) && IsScalar(from) { + return from, nil + } + if IsSequence(into) && IsSequence(from) { + return from, nil + } + if IsMapping(into) && IsMapping(from) { + return mergeMapping(into.(mapping), from.(mapping), strict) + } + // YAML types don't match, so no merge is possible. For backward + // compatibility, ignore mismatches unless we're in strict mode and return + // the higher-priority value. + if !strict { + return from, nil + } + return nil, fmt.Errorf("can't merge a %s into a %s", describe(from), describe(into)) +} + +func mergeMapping(into, from mapping, strict bool) (mapping, error) { + merged := make(mapping, len(into)) + for k, v := range into { + merged[k] = v + } + for k := range from { + m, err := merge(merged[k], from[k], strict) + if err != nil { + return nil, err + } + merged[k] = m + } + return merged, nil +} + +// IsMapping reports whether a type is a mapping in YAML, represented as a +// map[interface{}]interface{}. +func IsMapping(i interface{}) bool { + _, is := i.(mapping) + return is +} + +// IsSequence reports whether a type is a sequence in YAML, represented as an +// []interface{}. +func IsSequence(i interface{}) bool { + _, is := i.(sequence) + return is +} + +// IsScalar reports whether a type is a scalar value in YAML. +func IsScalar(i interface{}) bool { + return !IsMapping(i) && !IsSequence(i) +} + +func describe(i interface{}) string { + if IsMapping(i) { + return "mapping" + } + if IsSequence(i) { + return "sequence" + } + return "scalar" +} diff --git a/pkg/yamlpatch/merge_test.go b/pkg/yamlpatch/merge_test.go new file mode 100644 index 0000000..e86f6fe --- /dev/null +++ b/pkg/yamlpatch/merge_test.go @@ -0,0 +1,238 @@ +// Copyright (c) 2018 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package yamlpatch + +import ( + "bytes" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + yaml "gopkg.in/yaml.v2" +) + +func trimcr(s string) string { + return strings.ReplaceAll(s, "\r\n", "\n") +} + +func mustRead(t testing.TB, fname string) []byte { + contents, err := os.ReadFile(fname) + require.NoError(t, err, "failed to read file: %s", fname) + return contents +} + +func dump(t testing.TB, actual, expected string) { + // It's impossible to debug YAML if the actual and expected values are + // printed on a single line. + t.Logf("Actual:\n\n%s\n\n", actual) + t.Logf("Expected:\n\n%s\n\n", expected) +} + +func strip(s string) string { + // It's difficult to write string constants that are valid YAML. Normalize + // strings for ease of testing. + s = strings.TrimSpace(s) + s = strings.Replace(s, "\t", " ", -1) + return s +} + +func canonicalize(t testing.TB, s string) string { + // round-trip to canonicalize formatting + var i interface{} + require.NoError(t, + yaml.Unmarshal([]byte(strip(s)), &i), + "canonicalize: couldn't unmarshal YAML", + ) + formatted, err := yaml.Marshal(i) + require.NoError(t, err, "canonicalize: couldn't marshal YAML") + return string(bytes.TrimSpace(formatted)) +} + +func unmarshal(t testing.TB, s string) interface{} { + var i interface{} + require.NoError(t, yaml.Unmarshal([]byte(strip(s)), &i), "unmarshaling failed") + return i +} + +func succeeds(t testing.TB, strict bool, left, right, expect string) { + l, r := unmarshal(t, left), unmarshal(t, right) + m, err := merge(l, r, strict) + require.NoError(t, err, "merge failed") + + actualBytes, err := yaml.Marshal(m) + require.NoError(t, err, "couldn't marshal merged structure") + actual := canonicalize(t, string(actualBytes)) + expect = canonicalize(t, expect) + if !assert.Equal(t, expect, actual) { + dump(t, actual, expect) + } +} + +func fails(t testing.TB, strict bool, left, right string) { + _, err := merge(unmarshal(t, left), unmarshal(t, right), strict) + assert.Error(t, err, "merge succeeded") +} + +func TestIntegration(t *testing.T) { + base := mustRead(t, "testdata/base.yaml") + prod := mustRead(t, "testdata/production.yaml") + expect := mustRead(t, "testdata/expect.yaml") + + merged, err := YAML([][]byte{base, prod}, true /* strict */) + require.NoError(t, err, "merge failed") + + if !assert.Equal(t, trimcr(string(expect)), merged.String(), "unexpected contents") { + dump(t, merged.String(), string(expect)) + } +} + +func TestEmpty(t *testing.T) { + full := []byte("foo: bar\n") + null := []byte("~") + + tests := []struct { + desc string + sources [][]byte + expect string + }{ + {"empty base", [][]byte{nil, full}, string(full)}, + {"empty override", [][]byte{full, nil}, string(full)}, + {"both empty", [][]byte{nil, nil}, ""}, + {"null base", [][]byte{null, full}, string(full)}, + {"null override", [][]byte{full, null}, "null\n"}, + {"empty base and null override", [][]byte{nil, null}, "null\n"}, + {"null base and empty override", [][]byte{null, nil}, "null\n"}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.desc, func(t *testing.T) { + merged, err := YAML(tt.sources, true /* strict */) + require.NoError(t, err, "merge failed") + assert.Equal(t, tt.expect, merged.String(), "wrong contents after merge") + }) + } +} + +func TestSuccess(t *testing.T) { + left := ` +fun: [maserati, porsche] +practical: {toyota: camry, honda: accord} +occupants: + honda: {driver: jane, backseat: [nate]} + ` + right := ` +fun: [lamborghini, porsche] +practical: {honda: civic, nissan: altima} +occupants: + honda: {passenger: arthur, backseat: [nora]} + ` + expect := ` +fun: [lamborghini, porsche] +practical: {toyota: camry, honda: civic, nissan: altima} +occupants: + honda: {passenger: arthur, driver: jane, backseat: [nora]} + ` + succeeds(t, true, left, right, expect) + succeeds(t, false, left, right, expect) +} + +func TestErrors(t *testing.T) { + check := func(t testing.TB, strict bool, sources ...[]byte) error { + _, err := YAML(sources, strict) + return err + } + t.Run("tabs in source", func(t *testing.T) { + src := []byte("foo:\n\tbar:baz") + assert.Error(t, check(t, false, src), "expected error in permissive mode") + assert.Error(t, check(t, true, src), "expected error in strict mode") + }) + + t.Run("duplicated keys", func(t *testing.T) { + src := []byte("{foo: bar, foo: baz}") + assert.NoError(t, check(t, false, src), "expected success in permissive mode") + assert.Error(t, check(t, true, src), "expected error in permissive mode") + }) + + t.Run("merge error", func(t *testing.T) { + left := []byte("foo: [1, 2]") + right := []byte("foo: {bar: baz}") + assert.NoError(t, check(t, false, left, right), "expected success in permissive mode") + assert.Error(t, check(t, true, left, right), "expected error in strict mode") + }) +} + +func TestMismatchedTypes(t *testing.T) { + tests := []struct { + desc string + left, right string + }{ + {"sequence and mapping", "[one, two]", "{foo: bar}"}, + {"sequence and scalar", "[one, two]", "foo"}, + {"mapping and scalar", "{foo: bar}", "foo"}, + {"nested", "{foo: [one, two]}", "{foo: bar}"}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.desc+" strict", func(t *testing.T) { + fails(t, true, tt.left, tt.right) + }) + t.Run(tt.desc+" permissive", func(t *testing.T) { + // prefer the higher-priority value + succeeds(t, false, tt.left, tt.right, tt.right) + }) + } +} + +func TestBooleans(t *testing.T) { + // YAML helpfully interprets many strings as Booleans. + tests := []struct { + in, out string + }{ + {"yes", "true"}, + {"YES", "true"}, + {"on", "true"}, + {"ON", "true"}, + {"no", "false"}, + {"NO", "false"}, + {"off", "false"}, + {"OFF", "false"}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.in, func(t *testing.T) { + succeeds(t, true, "", tt.in, tt.out) + succeeds(t, false, "", tt.in, tt.out) + }) + } +} + +func TestExplicitNil(t *testing.T) { + base := `foo: {one: two}` + override := `foo: ~` + expect := `foo: ~` + succeeds(t, true, base, override, expect) + succeeds(t, false, base, override, expect) +} diff --git a/pkg/yamlpatch/patcher.go b/pkg/yamlpatch/patcher.go new file mode 100644 index 0000000..300488e --- /dev/null +++ b/pkg/yamlpatch/patcher.go @@ -0,0 +1,154 @@ +package yamlpatch + +import ( + "bytes" + "io" + "os" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +type Patcher struct { + BaseFilePath string + PatchFilePath string +} + +func NewPatcher(filePath string, suffix string) *Patcher { + return &Patcher{ + BaseFilePath: filePath, + PatchFilePath: filePath + suffix, + } +} + +// read a single YAML file, check for errors (the merge package doesn't) then return the content as bytes. +func readYAML(filePath string) ([]byte, error) { + var content []byte + + var err error + + if content, err = os.ReadFile(filePath); err != nil { + return nil, errors.Wrap(err, "while reading yaml file") + } + + var yamlMap map[interface{}]interface{} + if err = yaml.Unmarshal(content, &yamlMap); err != nil { + return nil, errors.Wrap(err, filePath) + } + + return content, nil +} + +// MergedPatchContent reads a YAML file and, if it exists, its patch file, +// then merges them and returns it serialized. +func (p *Patcher) MergedPatchContent() ([]byte, error) { + var err error + + var base []byte + + base, err = readYAML(p.BaseFilePath) + if err != nil { + return nil, err + } + + var over []byte + + over, err = readYAML(p.PatchFilePath) + // optional file, ignore if it does not exist + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + if err == nil { + log.Infof("Patching yaml: '%s' with '%s'", p.BaseFilePath, p.PatchFilePath) + } + + var patched *bytes.Buffer + + // strict mode true, will raise errors for duplicate map keys and + // overriding with a different type + patched, err = YAML([][]byte{base, over}, true) + if err != nil { + return nil, err + } + + return patched.Bytes(), nil +} + +// read multiple YAML documents inside a file, and writes them to a buffer +// separated by the appropriate '---' terminators. +func decodeDocuments(file *os.File, buf *bytes.Buffer, finalDashes bool) error { + var ( + err error + docBytes []byte + ) + + dec := yaml.NewDecoder(file) + dec.SetStrict(true) + + dashTerminator := false + + for { + yml := make(map[interface{}]interface{}) + + err = dec.Decode(&yml) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return errors.Wrapf(err, "while decoding %s", file.Name()) + } + + docBytes, err = yaml.Marshal(&yml) + if err != nil { + return errors.Wrapf(err, "while marshaling %s", file.Name()) + } + + if dashTerminator { + buf.Write([]byte("---\n")) + } + + buf.Write(docBytes) + dashTerminator = true + } + if dashTerminator && finalDashes { + buf.Write([]byte("---\n")) + } + return nil +} + +// PrependedPatchContent collates the base .yaml file with the .yaml.patch, by putting +// the content of the patch BEFORE the base document. The result is a multi-document +// YAML in all cases, even if the base and patch files are single documents. +func (p *Patcher) PrependedPatchContent() ([]byte, error) { + var ( + result bytes.Buffer + patchFile *os.File + baseFile *os.File + err error + ) + + patchFile, err = os.Open(p.PatchFilePath) + // optional file, ignore if it does not exist + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, errors.Wrapf(err, "while opening %s", p.PatchFilePath) + } + + if patchFile != nil { + if err = decodeDocuments(patchFile, &result, true); err != nil { + return nil, err + } + log.Infof("Prepending yaml: '%s' with '%s'", p.BaseFilePath, p.PatchFilePath) + } + + baseFile, err = os.Open(p.BaseFilePath) + if err != nil { + return nil, errors.Wrapf(err, "while opening %s", p.BaseFilePath) + } + + if err = decodeDocuments(baseFile, &result, false); err != nil { + return nil, err + } + + return result.Bytes(), nil +} diff --git a/pkg/yamlpatch/patcher_test.go b/pkg/yamlpatch/patcher_test.go new file mode 100644 index 0000000..be4a855 --- /dev/null +++ b/pkg/yamlpatch/patcher_test.go @@ -0,0 +1,313 @@ +package yamlpatch_test + +import ( + "os" + "path/filepath" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/yamlpatch" + "github.com/stretchr/testify/require" +) + +// similar to the one in cstest, but with test number too. We cannot import +// cstest here because of circular dependency. +func requireErrorContains(t *testing.T, err error, expectedErr string) { + t.Helper() + + if expectedErr != "" { + require.ErrorContains(t, err, expectedErr) + + return + } + + require.NoError(t, err) +} + +func TestMergedPatchContent(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + base string + patch string + expected string + expectedErr string + }{ + { + "invalid yaml in base", + "notayaml", + "", + "", + "config.yaml: yaml: unmarshal errors:", + }, + { + "invalid yaml in base (detailed message)", + "notayaml", + "", + "", + "cannot unmarshal !!str `notayaml`", + }, + { + "invalid yaml in patch", + "", + "notayaml", + "", + "config.yaml.local: yaml: unmarshal errors:", + }, + { + "invalid yaml in patch (detailed message)", + "", + "notayaml", + "", + "cannot unmarshal !!str `notayaml`", + }, + { + "basic merge", + "{'first':{'one':1,'two':2},'second':{'three':3}}", + "{'first':{'one':10,'dos':2}}", + "{'first':{'one':10,'dos':2,'two':2},'second':{'three':3}}", + "", + }, + + // bools and zero values; here the "mergo" package had issues + // so we used something simpler. + + { + "bool merge - off if false", + "bool: on", + "bool: off", + "bool: false", + "", + }, + { + "bool merge - on is true", + "bool: off", + "bool: on", + "bool: true", + "", + }, + { + "string is not a bool - on to off", + "{'bool': 'on'}", + "{'bool': 'off'}", + "{'bool': 'off'}", + "", + }, + { + "string is not a bool - off to on", + "{'bool': 'off'}", + "{'bool': 'on'}", + "{'bool': 'on'}", + "", + }, + { + "bool merge - true to false", + "{'bool': true}", + "{'bool': false}", + "{'bool': false}", + "", + }, + { + "bool merge - false to true", + "{'bool': false}", + "{'bool': true}", + "{'bool': true}", + "", + }, + { + "string merge - value to value", + "{'string': 'value'}", + "{'string': ''}", + "{'string': ''}", + "", + }, + { + "sequence merge - value to empty", + "{'sequence': [1, 2]}", + "{'sequence': []}", + "{'sequence': []}", + "", + }, + { + "map merge - value to value", + "{'map': {'one': 1, 'two': 2}}", + "{'map': {}}", + "{'map': {'one': 1, 'two': 2}}", + "", + }, + + // mismatched types + + { + "can't merge a sequence into a mapping", + "map: {'key': 'value'}", + "map: ['value1', 'value2']", + "", + "can't merge a sequence into a mapping", + }, + { + "can't merge a scalar into a mapping", + "map: {'key': 'value'}", + "map: 3", + "", + "can't merge a scalar into a mapping", + }, + { + "can't merge a mapping into a sequence", + "sequence: ['value1', 'value2']", + "sequence: {'key': 'value'}", + "", + "can't merge a mapping into a sequence", + }, + { + "can't merge a scalar into a sequence", + "sequence: ['value1', 'value2']", + "sequence: 3", + "", + "can't merge a scalar into a sequence", + }, + { + "can't merge a sequence into a scalar", + "scalar: true", + "scalar: ['value1', 'value2']", + "", + "can't merge a sequence into a scalar", + }, + { + "can't merge a mapping into a scalar", + "scalar: true", + "scalar: {'key': 'value'}", + "", + "can't merge a mapping into a scalar", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + dirPath, err := os.MkdirTemp("", "yamlpatch") + require.NoError(t, err) + + defer os.RemoveAll(dirPath) + + configPath := filepath.Join(dirPath, "config.yaml") + patchPath := filepath.Join(dirPath, "config.yaml.local") + err = os.WriteFile(configPath, []byte(tc.base), 0o600) + require.NoError(t, err) + + err = os.WriteFile(patchPath, []byte(tc.patch), 0o600) + require.NoError(t, err) + + patcher := yamlpatch.NewPatcher(configPath, ".local") + patchedBytes, err := patcher.MergedPatchContent() + requireErrorContains(t, err, tc.expectedErr) + require.YAMLEq(t, tc.expected, string(patchedBytes)) + }) + } +} + +func TestPrependedPatchContent(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + base string + patch string + expected string + expectedErr string + }{ + // we test with scalars here, because YAMLeq does not work + // with multi-document files, so we need char-to-char comparison + // which is noisy with sequences and (unordered) mappings + { + "newlines are always appended, if missing, by yaml.Marshal()", + "foo: bar", + "", + "foo: bar\n", + "", + }, + { + "prepend empty document", + "foo: bar\n", + "", + "foo: bar\n", + "", + }, + { + "prepend a document to another", + "foo: bar", + "baz: qux", + "baz: qux\n---\nfoo: bar\n", + "", + }, + { + "prepend document with same key", + "foo: true", + "foo: false", + "foo: false\n---\nfoo: true\n", + "", + }, + { + "prepend multiple documents", + "one: 1\n---\ntwo: 2\n---\none: 3", + "four: 4\n---\none: 1.1", + "four: 4\n---\none: 1.1\n---\none: 1\n---\ntwo: 2\n---\none: 3\n", + "", + }, + { + "invalid yaml in base", + "blablabla", + "", + "", + "config.yaml: yaml: unmarshal errors:", + }, + { + "invalid yaml in base (detailed message)", + "blablabla", + "", + "", + "cannot unmarshal !!str `blablabla`", + }, + { + "invalid yaml in patch", + "", + "blablabla", + "", + "config.yaml.local: yaml: unmarshal errors:", + }, + { + "invalid yaml in patch (detailed message)", + "", + "blablabla", + "", + "cannot unmarshal !!str `blablabla`", + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + dirPath, err := os.MkdirTemp("", "yamlpatch") + require.NoError(t, err) + + defer os.RemoveAll(dirPath) + + configPath := filepath.Join(dirPath, "config.yaml") + patchPath := filepath.Join(dirPath, "config.yaml.local") + + err = os.WriteFile(configPath, []byte(tc.base), 0o600) + require.NoError(t, err) + + err = os.WriteFile(patchPath, []byte(tc.patch), 0o600) + require.NoError(t, err) + + patcher := yamlpatch.NewPatcher(configPath, ".local") + patchedBytes, err := patcher.PrependedPatchContent() + requireErrorContains(t, err, tc.expectedErr) + // YAMLeq does not handle multiple documents + require.Equal(t, tc.expected, string(patchedBytes)) + }) + } +} diff --git a/pkg/yamlpatch/testdata/base.yaml b/pkg/yamlpatch/testdata/base.yaml new file mode 100644 index 0000000..4ac551a --- /dev/null +++ b/pkg/yamlpatch/testdata/base.yaml @@ -0,0 +1,13 @@ +fun: + - maserati + - porsche + +practical: + toyota: camry + honda: accord + +occupants: + honda: + driver: jane + backseat: + - nate diff --git a/pkg/yamlpatch/testdata/expect.yaml b/pkg/yamlpatch/testdata/expect.yaml new file mode 100644 index 0000000..c190915 --- /dev/null +++ b/pkg/yamlpatch/testdata/expect.yaml @@ -0,0 +1,13 @@ +fun: +- lamborghini +- porsche +occupants: + honda: + backseat: + - nora + driver: jane + passenger: arthur +practical: + honda: civic + nissan: altima + toyota: camry diff --git a/pkg/yamlpatch/testdata/production.yaml b/pkg/yamlpatch/testdata/production.yaml new file mode 100644 index 0000000..7dab2ae --- /dev/null +++ b/pkg/yamlpatch/testdata/production.yaml @@ -0,0 +1,13 @@ +fun: + - lamborghini + - porsche + +practical: + honda: civic + nissan: altima + +occupants: + honda: + passenger: arthur + backseat: + - nora diff --git a/platform/freebsd.mk b/platform/freebsd.mk new file mode 100644 index 0000000..48ccdc5 --- /dev/null +++ b/platform/freebsd.mk @@ -0,0 +1,6 @@ +# FreeBSD specific +# + +Make=gmake + +$(info building for FreeBSD) \ No newline at end of file diff --git a/platform/linux.mk b/platform/linux.mk new file mode 100644 index 0000000..0c31e88 --- /dev/null +++ b/platform/linux.mk @@ -0,0 +1,5 @@ +# Linux specific + +MAKE=make + +$(info Building for linux) \ No newline at end of file diff --git a/platform/openbsd.mk b/platform/openbsd.mk new file mode 100644 index 0000000..def3775 --- /dev/null +++ b/platform/openbsd.mk @@ -0,0 +1,6 @@ +# OpenBSD specific +# + +Make=gmake + +$(info building for OpenBSD) diff --git a/platform/unix_common.mk b/platform/unix_common.mk new file mode 100644 index 0000000..3ce3d97 --- /dev/null +++ b/platform/unix_common.mk @@ -0,0 +1,22 @@ + +RM=rm -rf +CP=cp +CPR=cp -r +MKDIR=mkdir -p + +# Go should not be required to run functional tests +GOOS ?= $(shell command -v go >/dev/null && go env GOOS) +GOARCH ?= $(shell command -v go >/dev/null && go env GOARCH) + +GO_MAJOR_VERSION = $(shell command -v go >/dev/null && go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f1) +GO_MINOR_VERSION = $(shell command -v go >/dev/null && go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f2) + +BUILD_GOVERSION="$(shell command -v go >/dev/null && go version | cut -d " " -f3 | sed -E 's/[go]+//g')" + +#Current versioning information from env +BUILD_VERSION?="$(shell git describe --tags $$(git rev-list --tags --max-count=1))" +BUILD_CODENAME="alphaga" +BUILD_TIMESTAMP=$(shell date +%F"_"%T) +BUILD_TAG?="$(shell git rev-parse HEAD)" +DEFAULT_CONFIGDIR?=/etc/crowdsec +DEFAULT_DATADIR?=/var/lib/crowdsec/data diff --git a/platform/windows.mk b/platform/windows.mk new file mode 100644 index 0000000..0370859 --- /dev/null +++ b/platform/windows.mk @@ -0,0 +1,35 @@ +# Windows specific +# + +MAKE=make +GOOS=windows +PREFIX=$(shell $$env:TEMP) + +GOOS ?= $(shell go env GOOS) +GOARCH ?= $(shell go env GOARCH) + +GO_MAJOR_VERSION ?= $(shell (go env GOVERSION).replace("go","").split(".")[0]) +GO_MINOR_VERSION ?= $(shell (go env GOVERSION).replace("go","").split(".")[1]) +MINIMUM_SUPPORTED_GO_MAJOR_VERSION = 1 +MINIMUM_SUPPORTED_GO_MINOR_VERSION = 17 +GO_VERSION_VALIDATION_ERR_MSG = Golang version ($(BUILD_GOVERSION)) is not supported, please use least $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION).$(MINIMUM_SUPPORTED_GO_MINOR_VERSION) +#Current versioning information from env +#BUILD_VERSION?=$(shell (Invoke-WebRequest -UseBasicParsing -Uri https://api.github.com/repos/crowdsecurity/crowdsec/releases/latest).Content | jq -r '.tag_name') +#hardcode it till i find a workaround +BUILD_VERSION?=$(shell git describe --tags $$(git rev-list --tags --max-count=1)) +BUILD_GOVERSION?=$(shell (go env GOVERSION).replace("go","")) +BUILD_CODENAME?=alphaga +BUILD_TIMESTAMP?=$(shell Get-Date -Format "yyyy-MM-dd_HH:mm:ss") +BUILD_TAG?=$(shell git rev-parse HEAD) +DEFAULT_CONFIGDIR?=C:\\ProgramData\\CrowdSec\\config +DEFAULT_DATADIR?=C:\\ProgramData\\CrowdSec\\data + +#please tell me there is a better way to completly ignore errors when trying to delete a file.... +RM=Remove-Item -ErrorAction Ignore -Recurse +CP=Copy-Item +CPR=Copy-Item -Recurse +MKDIR=New-Item -ItemType directory +WIN_IGNORE_ERR=; exit 0 + + +$(info Building for windows) diff --git a/plugins/notifications/dummy/LICENSE b/plugins/notifications/dummy/LICENSE new file mode 100644 index 0000000..9125638 --- /dev/null +++ b/plugins/notifications/dummy/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Crowdsec + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/plugins/notifications/dummy/Makefile b/plugins/notifications/dummy/Makefile new file mode 100644 index 0000000..e47c2fa --- /dev/null +++ b/plugins/notifications/dummy/Makefile @@ -0,0 +1,20 @@ +ifeq ($(OS), Windows_NT) + SHELL := pwsh.exe + .SHELLFLAGS := -NoProfile -Command + EXT = .exe +endif + +# Go parameters +GOCMD = go +GOBUILD = $(GOCMD) build +GOCLEAN = $(GOCMD) clean +GOTEST = $(GOCMD) test +GOGET = $(GOCMD) get + +BINARY_NAME = notification-dummy$(EXT) + +build: clean + $(GOBUILD) $(LD_OPTS) $(BUILD_VENDOR_FLAGS) -o $(BINARY_NAME) + +clean: + @$(RM) $(BINARY_NAME) $(WIN_IGNORE_ERR) diff --git a/plugins/notifications/dummy/dummy.yaml b/plugins/notifications/dummy/dummy.yaml new file mode 100644 index 0000000..060deb6 --- /dev/null +++ b/plugins/notifications/dummy/dummy.yaml @@ -0,0 +1,28 @@ +type: dummy # Don't change +name: dummy_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info + +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +# timeout: # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the logs and to a text file, if defined +format: | + {{.|toJson}} + +# +# output_file: # notifications will be appended here. optional + +--- + +# type: dummy +# name: dummy_second_notification +# ... + diff --git a/plugins/notifications/dummy/main.go b/plugins/notifications/dummy/main.go new file mode 100644 index 0000000..ef8d29f --- /dev/null +++ b/plugins/notifications/dummy/main.go @@ -0,0 +1,88 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" + "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "gopkg.in/yaml.v2" +) + +type PluginConfig struct { + Name string `yaml:"name"` + LogLevel *string `yaml:"log_level"` + OutputFile *string `yaml:"output_file"` +} + +type DummyPlugin struct { + PluginConfigByName map[string]PluginConfig +} + +var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{ + Name: "dummy-plugin", + Level: hclog.LevelFromString("INFO"), + Output: os.Stderr, + JSONFormat: true, +}) + +func (s *DummyPlugin) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { + if _, ok := s.PluginConfigByName[notification.Name]; !ok { + return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) + } + cfg := s.PluginConfigByName[notification.Name] + + if cfg.LogLevel != nil && *cfg.LogLevel != "" { + logger.SetLevel(hclog.LevelFromString(*cfg.LogLevel)) + } + + logger.Info(fmt.Sprintf("received signal for %s config", notification.Name)) + logger.Debug(notification.Text) + + if cfg.OutputFile != nil && *cfg.OutputFile != "" { + f, err := os.OpenFile(*cfg.OutputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + logger.Error(fmt.Sprintf("Cannot open notification file: %s", err)) + } + if _, err := f.WriteString(notification.Text + "\n"); err != nil { + f.Close() + logger.Error(fmt.Sprintf("Cannot write notification to file: %s", err)) + } + err = f.Close() + if err != nil { + logger.Error(fmt.Sprintf("Cannot close notification file: %s", err)) + } + } + fmt.Println(notification.Text) + + return &protobufs.Empty{}, nil +} + +func (s *DummyPlugin) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { + d := PluginConfig{} + err := yaml.Unmarshal(config.Config, &d) + s.PluginConfigByName[d.Name] = d + return &protobufs.Empty{}, err +} + +func main() { + var handshake = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "CROWDSEC_PLUGIN_KEY", + MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), + } + + sp := &DummyPlugin{PluginConfigByName: make(map[string]PluginConfig)} + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: handshake, + Plugins: map[string]plugin.Plugin{ + "dummy": &protobufs.NotifierPlugin{ + Impl: sp, + }, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: logger, + }) +} diff --git a/plugins/notifications/email/LICENSE b/plugins/notifications/email/LICENSE new file mode 100644 index 0000000..9125638 --- /dev/null +++ b/plugins/notifications/email/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Crowdsec + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/plugins/notifications/email/Makefile b/plugins/notifications/email/Makefile new file mode 100644 index 0000000..e80b4a7 --- /dev/null +++ b/plugins/notifications/email/Makefile @@ -0,0 +1,20 @@ +ifeq ($(OS), Windows_NT) + SHELL := pwsh.exe + .SHELLFLAGS := -NoProfile -Command + EXT = .exe +endif + +# Go parameters +GOCMD = go +GOBUILD = $(GOCMD) build +GOCLEAN = $(GOCMD) clean +GOTEST = $(GOCMD) test +GOGET = $(GOCMD) get + +BINARY_NAME = notification-email$(EXT) + +build: clean + $(GOBUILD) $(LD_OPTS) $(BUILD_VENDOR_FLAGS) -o $(BINARY_NAME) + +clean: + @$(RM) $(BINARY_NAME) $(WIN_IGNORE_ERR) diff --git a/plugins/notifications/email/email.yaml b/plugins/notifications/email/email.yaml new file mode 100644 index 0000000..e2a1712 --- /dev/null +++ b/plugins/notifications/email/email.yaml @@ -0,0 +1,45 @@ +type: email # Don't change +name: email_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info + +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +timeout: 20s # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the email message body +format: | + {{range . -}} + {{$alert := . -}} + {{range .Decisions -}} +

{{.Value}} will get {{.Type}} for next {{.Duration}} for triggering {{.Scenario}} on machine {{$alert.MachineID}}.

CrowdSec CTI

+ {{end -}} + {{end -}} + +smtp_host: # example: smtp.gmail.com +smtp_username: # Replace with your actual username +smtp_password: # Replace with your actual password +smtp_port: # Common values are any of [25, 465, 587, 2525] +auth_type: # Valid choices are "none", "crammd5", "login", "plain" +sender_name: "CrowdSec" +sender_email: # example: foo@gmail.com +email_subject: "CrowdSec Notification" +receiver_emails: +# - email1@gmail.com +# - email2@gmail.com + +# One of "ssltls", "none" +encryption_type: ssltls + +--- + +# type: email +# name: email_second_notification +# ... + diff --git a/plugins/notifications/email/go.mod b/plugins/notifications/email/go.mod new file mode 100644 index 0000000..3d374e6 --- /dev/null +++ b/plugins/notifications/email/go.mod @@ -0,0 +1,27 @@ +module github.com/crowdsecurity/email-plugin + +go 1.19 + +require ( + github.com/crowdsecurity/crowdsec v1.4.1 + github.com/hashicorp/go-hclog v1.0.0 + github.com/hashicorp/go-plugin v1.4.3 + github.com/xhit/go-simple-mail/v2 v2.10.0 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/fatih/color v1.13.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/oklog/run v1.0.0 // indirect + golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 // indirect + golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 // indirect + google.golang.org/grpc v1.45.0 // indirect + google.golang.org/protobuf v1.28.0 // indirect +) diff --git a/plugins/notifications/email/go.sum b/plugins/notifications/email/go.sum new file mode 100644 index 0000000..f7c474f --- /dev/null +++ b/plugins/notifications/email/go.sum @@ -0,0 +1,185 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/crowdsecurity/crowdsec v1.4.1 h1:GNmOO3Thh710hSYEW0H+7BJCkMsrpafnM6et4cezxAc= +github.com/crowdsecurity/crowdsec v1.4.1/go.mod h1:du34G8w0vTwVucLoPoI5s1SiZoA7a8ZDAYlzV0ZInRM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0 h1:bkKf0BeBXcSYa7f5Fyi9gMuQ8gNsxeiNpZjR6VxNZeo= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1-0.20210427113832-6241f9ab9942 h1:t0lM6y/M5IiUZyvbBTcngso8SZEZICH7is9B6g/obVU= +github.com/xhit/go-simple-mail/v2 v2.10.0 h1:nib6RaJ4qVh5HD9UE9QJqnUZyWp3upv+Z6CFxaMj0V8= +github.com/xhit/go-simple-mail/v2 v2.10.0/go.mod h1:kA1XbQfCI4JxQ9ccSN6VFyIEkkugOm7YiPkA5hKiQn4= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 h1:6mzvA99KwZxbOrxww4EvWVQUnN1+xEu9tafK5ZxkYeA= +golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a h1:N2T1jUrTQE9Re6TFF5PhvEHXHCguynGhKjWVsIUt5cY= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 h1:myaecH64R0bIEDjNORIel4iXubqzaHU1K2z8ajBwWcM= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/plugins/notifications/email/main.go b/plugins/notifications/email/main.go new file mode 100644 index 0000000..ac09c1e --- /dev/null +++ b/plugins/notifications/email/main.go @@ -0,0 +1,149 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" + "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + mail "github.com/xhit/go-simple-mail/v2" + "gopkg.in/yaml.v2" +) + +var baseLogger hclog.Logger = hclog.New(&hclog.LoggerOptions{ + Name: "email-plugin", + Level: hclog.LevelFromString("INFO"), + Output: os.Stderr, + JSONFormat: true, +}) + +var AuthStringToType map[string]mail.AuthType = map[string]mail.AuthType{ + "none": mail.AuthNone, + "crammd5": mail.AuthCRAMMD5, + "login": mail.AuthLogin, + "plain": mail.AuthPlain, +} + +var EncryptionStringToType map[string]mail.Encryption = map[string]mail.Encryption{ + "ssltls": mail.EncryptionSSLTLS, + "starttls": mail.EncryptionSTARTTLS, + "none": mail.EncryptionNone, +} + +type PluginConfig struct { + Name string `yaml:"name"` + LogLevel *string `yaml:"log_level"` + + SMTPHost string `yaml:"smtp_host"` + SMTPPort int `yaml:"smtp_port"` + SMTPUsername string `yaml:"smtp_username"` + SMTPPassword string `yaml:"smtp_password"` + SenderEmail string `yaml:"sender_email"` + SenderName string `yaml:"sender_name"` + ReceiverEmails []string `yaml:"receiver_emails"` + EmailSubject string `yaml:"email_subject"` + EncryptionType string `yaml:"encryption_type"` + AuthType string `yaml:"auth_type"` + HeloHost string `yaml:"helo_host"` +} + +type EmailPlugin struct { + ConfigByName map[string]PluginConfig +} + +func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { + d := PluginConfig{ + SMTPPort: 25, + SenderName: "Crowdsec", + EmailSubject: "Crowdsec notification", + EncryptionType: "ssltls", + AuthType: "login", + SenderEmail: "crowdsec@crowdsec.local", + HeloHost: "localhost", + } + + if err := yaml.Unmarshal(config.Config, &d); err != nil { + return nil, err + } + + if d.Name == "" { + return nil, fmt.Errorf("name is required") + } + + if d.SMTPHost == "" { + return nil, fmt.Errorf("SMTP host is not set") + } + + if d.ReceiverEmails == nil || len(d.ReceiverEmails) == 0 { + return nil, fmt.Errorf("Receiver emails are not set") + } + + n.ConfigByName[d.Name] = d + baseLogger.Debug(fmt.Sprintf("Email plugin '%s' use SMTP host '%s:%d'", d.Name, d.SMTPHost, d.SMTPPort)) + return &protobufs.Empty{}, nil +} + +func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { + if _, ok := n.ConfigByName[notification.Name]; !ok { + return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) + } + cfg := n.ConfigByName[notification.Name] + + logger := baseLogger.Named(cfg.Name) + + if cfg.LogLevel != nil && *cfg.LogLevel != "" { + logger.SetLevel(hclog.LevelFromString(*cfg.LogLevel)) + } + + logger.Debug("got notification") + + server := mail.NewSMTPClient() + server.Host = cfg.SMTPHost + server.Port = cfg.SMTPPort + server.Username = cfg.SMTPUsername + server.Password = cfg.SMTPPassword + server.Encryption = EncryptionStringToType[cfg.EncryptionType] + server.Authentication = AuthStringToType[cfg.AuthType] + server.Helo = cfg.HeloHost + + logger.Debug("making smtp connection") + smtpClient, err := server.Connect() + if err != nil { + return &protobufs.Empty{}, err + } + logger.Debug("smtp connection done") + + email := mail.NewMSG() + email.SetFrom(fmt.Sprintf("%s <%s>", cfg.SenderName, cfg.SenderEmail)). + AddTo(cfg.ReceiverEmails...). + SetSubject(cfg.EmailSubject) + email.SetBody(mail.TextHTML, notification.Text) + + err = email.Send(smtpClient) + if err != nil { + return &protobufs.Empty{}, err + } + logger.Info(fmt.Sprintf("sent email to %v", cfg.ReceiverEmails)) + return &protobufs.Empty{}, nil +} + +func main() { + var handshake = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "CROWDSEC_PLUGIN_KEY", + MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), + } + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: handshake, + Plugins: map[string]plugin.Plugin{ + "email": &protobufs.NotifierPlugin{ + Impl: &EmailPlugin{ConfigByName: make(map[string]PluginConfig)}, + }, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: baseLogger, + }) +} diff --git a/plugins/notifications/http/LICENSE b/plugins/notifications/http/LICENSE new file mode 100644 index 0000000..9125638 --- /dev/null +++ b/plugins/notifications/http/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Crowdsec + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/plugins/notifications/http/Makefile b/plugins/notifications/http/Makefile new file mode 100644 index 0000000..8d0592f --- /dev/null +++ b/plugins/notifications/http/Makefile @@ -0,0 +1,20 @@ +ifeq ($(OS), Windows_NT) + SHELL := pwsh.exe + .SHELLFLAGS := -NoProfile -Command + EXT = .exe +endif + +# Go parameters +GOCMD = go +GOBUILD = $(GOCMD) build +GOCLEAN = $(GOCMD) clean +GOTEST = $(GOCMD) test +GOGET = $(GOCMD) get + +BINARY_NAME = notification-http$(EXT) + +build: clean + $(GOBUILD) $(LD_OPTS) $(BUILD_VENDOR_FLAGS) -o $(BINARY_NAME) + +clean: + @$(RM) $(BINARY_NAME) $(WIN_IGNORE_ERR) diff --git a/plugins/notifications/http/go.mod b/plugins/notifications/http/go.mod new file mode 100644 index 0000000..dc3c68c --- /dev/null +++ b/plugins/notifications/http/go.mod @@ -0,0 +1,26 @@ +module github.com/crowdsecurity/http-plugin + +go 1.19 + +require ( + github.com/crowdsecurity/crowdsec v1.4.1 + github.com/hashicorp/go-hclog v1.0.0 + github.com/hashicorp/go-plugin v1.4.2 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/fatih/color v1.13.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/oklog/run v1.0.0 // indirect + golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 // indirect + golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 // indirect + google.golang.org/grpc v1.45.0 // indirect + google.golang.org/protobuf v1.28.0 // indirect +) diff --git a/plugins/notifications/http/go.sum b/plugins/notifications/http/go.sum new file mode 100644 index 0000000..c9aba04 --- /dev/null +++ b/plugins/notifications/http/go.sum @@ -0,0 +1,183 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/crowdsecurity/crowdsec v1.4.1 h1:GNmOO3Thh710hSYEW0H+7BJCkMsrpafnM6et4cezxAc= +github.com/crowdsecurity/crowdsec v1.4.1/go.mod h1:du34G8w0vTwVucLoPoI5s1SiZoA7a8ZDAYlzV0ZInRM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0 h1:bkKf0BeBXcSYa7f5Fyi9gMuQ8gNsxeiNpZjR6VxNZeo= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-plugin v1.4.2 h1:yFvG3ufXXpqiMiZx9HLcaK3XbIqQ1WJFR/F1a2CuVw0= +github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1-0.20210427113832-6241f9ab9942 h1:t0lM6y/M5IiUZyvbBTcngso8SZEZICH7is9B6g/obVU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 h1:6mzvA99KwZxbOrxww4EvWVQUnN1+xEu9tafK5ZxkYeA= +golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a h1:N2T1jUrTQE9Re6TFF5PhvEHXHCguynGhKjWVsIUt5cY= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 h1:myaecH64R0bIEDjNORIel4iXubqzaHU1K2z8ajBwWcM= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/plugins/notifications/http/http.yaml b/plugins/notifications/http/http.yaml new file mode 100644 index 0000000..6a70d1b --- /dev/null +++ b/plugins/notifications/http/http.yaml @@ -0,0 +1,36 @@ +type: http # Don't change +name: http_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info + +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +# timeout: # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the http request body +format: | + {{.|toJson}} + +# The plugin will make requests to this url, eg: https://www.example.com/ +url: + +# Any of the http verbs: "POST", "GET", "PUT"... +method: POST + +# headers: +# Authorization: token 0x64312313 + +# skip_tls_verification: # true or false. Default is false + +--- + +# type: http +# name: http_second_notification +# ... + diff --git a/plugins/notifications/http/main.go b/plugins/notifications/http/main.go new file mode 100644 index 0000000..7e15fcc --- /dev/null +++ b/plugins/notifications/http/main.go @@ -0,0 +1,115 @@ +package main + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "net/http" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" + "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "gopkg.in/yaml.v2" +) + +type PluginConfig struct { + Name string `yaml:"name"` + URL string `yaml:"url"` + Headers map[string]string `yaml:"headers"` + SkipTLSVerification bool `yaml:"skip_tls_verification"` + Method string `yaml:"method"` + LogLevel *string `yaml:"log_level"` +} + +type HTTPPlugin struct { + PluginConfigByName map[string]PluginConfig +} + +var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{ + Name: "http-plugin", + Level: hclog.LevelFromString("INFO"), + Output: os.Stderr, + JSONFormat: true, +}) + +func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { + if _, ok := s.PluginConfigByName[notification.Name]; !ok { + return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) + } + cfg := s.PluginConfigByName[notification.Name] + + if cfg.LogLevel != nil && *cfg.LogLevel != "" { + logger.SetLevel(hclog.LevelFromString(*cfg.LogLevel)) + } + + logger.Info(fmt.Sprintf("received signal for %s config", notification.Name)) + client := http.Client{} + + if cfg.SkipTLSVerification { + client.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + } + + request, err := http.NewRequest(cfg.Method, cfg.URL, bytes.NewReader([]byte(notification.Text))) + if err != nil { + return nil, err + } + + for headerName, headerValue := range cfg.Headers { + logger.Debug(fmt.Sprintf("adding header %s: %s", headerName, headerValue)) + request.Header.Add(headerName, headerValue) + } + logger.Debug(fmt.Sprintf("making HTTP %s call to %s with body %s", cfg.Method, cfg.URL, string(notification.Text))) + resp, err := client.Do(request) + if err != nil { + logger.Error(fmt.Sprintf("Failed to make HTTP request : %s", err)) + return nil, err + } + defer resp.Body.Close() + + respData, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body got error %s", err) + } + + logger.Debug(fmt.Sprintf("got response %s", string(respData))) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + logger.Warn(fmt.Sprintf("HTTP server returned non 200 status code: %d", resp.StatusCode)) + return &protobufs.Empty{}, nil + } + + return &protobufs.Empty{}, nil +} + +func (s *HTTPPlugin) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { + d := PluginConfig{} + err := yaml.Unmarshal(config.Config, &d) + s.PluginConfigByName[d.Name] = d + logger.Debug(fmt.Sprintf("HTTP plugin '%s' use URL '%s'", d.Name, d.URL)) + return &protobufs.Empty{}, err +} + +func main() { + var handshake = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "CROWDSEC_PLUGIN_KEY", + MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), + } + + sp := &HTTPPlugin{PluginConfigByName: make(map[string]PluginConfig)} + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: handshake, + Plugins: map[string]plugin.Plugin{ + "http": &protobufs.NotifierPlugin{ + Impl: sp, + }, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: logger, + }) +} diff --git a/plugins/notifications/slack/LICENSE b/plugins/notifications/slack/LICENSE new file mode 100644 index 0000000..9125638 --- /dev/null +++ b/plugins/notifications/slack/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Crowdsec + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/plugins/notifications/slack/Makefile b/plugins/notifications/slack/Makefile new file mode 100644 index 0000000..406b687 --- /dev/null +++ b/plugins/notifications/slack/Makefile @@ -0,0 +1,20 @@ +ifeq ($(OS), Windows_NT) + SHELL := pwsh.exe + .SHELLFLAGS := -NoProfile -Command + EXT = .exe +endif + +# Go parameters +GOCMD = go +GOBUILD = $(GOCMD) build +GOCLEAN = $(GOCMD) clean +GOTEST = $(GOCMD) test +GOGET = $(GOCMD) get + +BINARY_NAME = notification-slack$(EXT) + +build: clean + $(GOBUILD) $(LD_OPTS) $(BUILD_VENDOR_FLAGS) -o $(BINARY_NAME) + +clean: + @$(RM) $(BINARY_NAME) $(WIN_IGNORE_ERR) diff --git a/plugins/notifications/slack/go.mod b/plugins/notifications/slack/go.mod new file mode 100644 index 0000000..8f415c8 --- /dev/null +++ b/plugins/notifications/slack/go.mod @@ -0,0 +1,29 @@ +module github.com/crowdsecurity/slack-plugin + +go 1.19 + +require ( + github.com/crowdsecurity/crowdsec v1.4.1 + github.com/hashicorp/go-hclog v1.0.0 + github.com/hashicorp/go-plugin v1.4.2 + github.com/slack-go/slack v0.9.2 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/fatih/color v1.13.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/gorilla/websocket v1.4.2 // indirect + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 // indirect + golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 // indirect + google.golang.org/grpc v1.45.0 // indirect + google.golang.org/protobuf v1.28.0 // indirect +) diff --git a/plugins/notifications/slack/go.sum b/plugins/notifications/slack/go.sum new file mode 100644 index 0000000..7f95cca --- /dev/null +++ b/plugins/notifications/slack/go.sum @@ -0,0 +1,192 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/crowdsecurity/crowdsec v1.4.1 h1:GNmOO3Thh710hSYEW0H+7BJCkMsrpafnM6et4cezxAc= +github.com/crowdsecurity/crowdsec v1.4.1/go.mod h1:du34G8w0vTwVucLoPoI5s1SiZoA7a8ZDAYlzV0ZInRM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0 h1:bkKf0BeBXcSYa7f5Fyi9gMuQ8gNsxeiNpZjR6VxNZeo= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-plugin v1.4.2 h1:yFvG3ufXXpqiMiZx9HLcaK3XbIqQ1WJFR/F1a2CuVw0= +github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/slack-go/slack v0.9.2 h1:tjIrKKYUCOmWeEAktWShKW+3UjLTH/wmgmCkAGAf8wM= +github.com/slack-go/slack v0.9.2/go.mod h1:wWL//kk0ho+FcQXcBTmEafUI5dz4qz5f4mMk8oIkioQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1-0.20210427113832-6241f9ab9942 h1:t0lM6y/M5IiUZyvbBTcngso8SZEZICH7is9B6g/obVU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 h1:6mzvA99KwZxbOrxww4EvWVQUnN1+xEu9tafK5ZxkYeA= +golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a h1:N2T1jUrTQE9Re6TFF5PhvEHXHCguynGhKjWVsIUt5cY= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 h1:myaecH64R0bIEDjNORIel4iXubqzaHU1K2z8ajBwWcM= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/plugins/notifications/slack/main.go b/plugins/notifications/slack/main.go new file mode 100644 index 0000000..9018323 --- /dev/null +++ b/plugins/notifications/slack/main.go @@ -0,0 +1,81 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" + "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + + "github.com/slack-go/slack" + "gopkg.in/yaml.v2" +) + +type PluginConfig struct { + Name string `yaml:"name"` + Webhook string `yaml:"webhook"` + LogLevel *string `yaml:"log_level"` +} +type Notify struct { + ConfigByName map[string]PluginConfig +} + +var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{ + Name: "slack-plugin", + Level: hclog.LevelFromString("INFO"), + Output: os.Stderr, + JSONFormat: true, +}) + +func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { + if _, ok := n.ConfigByName[notification.Name]; !ok { + return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) + } + cfg := n.ConfigByName[notification.Name] + + if cfg.LogLevel != nil && *cfg.LogLevel != "" { + logger.SetLevel(hclog.LevelFromString(*cfg.LogLevel)) + } + + logger.Info(fmt.Sprintf("found notify signal for %s config", notification.Name)) + logger.Debug(fmt.Sprintf("posting to %s webhook, message %s", cfg.Webhook, notification.Text)) + err := slack.PostWebhook(n.ConfigByName[notification.Name].Webhook, &slack.WebhookMessage{ + Text: notification.Text, + }) + if err != nil { + logger.Error(err.Error()) + } + + return &protobufs.Empty{}, err +} + +func (n *Notify) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { + d := PluginConfig{} + if err := yaml.Unmarshal(config.Config, &d); err != nil { + return nil, err + } + n.ConfigByName[d.Name] = d + logger.Debug(fmt.Sprintf("Slack plugin '%s' use URL '%s'", d.Name, d.Webhook)) + return &protobufs.Empty{}, nil +} + +func main() { + var handshake = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "CROWDSEC_PLUGIN_KEY", + MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), + } + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: handshake, + Plugins: map[string]plugin.Plugin{ + "slack": &protobufs.NotifierPlugin{ + Impl: &Notify{ConfigByName: make(map[string]PluginConfig)}, + }, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: logger, + }) +} diff --git a/plugins/notifications/slack/slack.yaml b/plugins/notifications/slack/slack.yaml new file mode 100644 index 0000000..4768e86 --- /dev/null +++ b/plugins/notifications/slack/slack.yaml @@ -0,0 +1,36 @@ +type: slack # Don't change +name: slack_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info + +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +# timeout: # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the slack message +format: | + {{range . -}} + {{$alert := . -}} + {{range .Decisions -}} + {{if $alert.Source.Cn -}} + :flag-{{$alert.Source.Cn}}: will get {{.Type}} for next {{.Duration}} for triggering {{.Scenario}} on machine '{{$alert.MachineID}}'. {{end}} + {{if not $alert.Source.Cn -}} + :pirate_flag: will get {{.Type}} for next {{.Duration}} for triggering {{.Scenario}} on machine '{{$alert.MachineID}}'. {{end}} + {{end -}} + {{end -}} + + +webhook: + +--- + +# type: slack +# name: slack_second_notification +# ... + diff --git a/plugins/notifications/splunk/LICENSE b/plugins/notifications/splunk/LICENSE new file mode 100644 index 0000000..9125638 --- /dev/null +++ b/plugins/notifications/splunk/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Crowdsec + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/plugins/notifications/splunk/Makefile b/plugins/notifications/splunk/Makefile new file mode 100644 index 0000000..6a24b5c --- /dev/null +++ b/plugins/notifications/splunk/Makefile @@ -0,0 +1,20 @@ +ifeq ($(OS), Windows_NT) + SHELL := pwsh.exe + .SHELLFLAGS := -NoProfile -Command + EXT = .exe +endif + +# Go parameters +GOCMD = go +GOBUILD = $(GOCMD) build +GOCLEAN = $(GOCMD) clean +GOTEST = $(GOCMD) test +GOGET = $(GOCMD) get + +BINARY_NAME = notification-splunk$(EXT) + +build: clean + $(GOBUILD) $(LD_OPTS) $(BUILD_VENDOR_FLAGS) -o $(BINARY_NAME) + +clean: + @$(RM) $(BINARY_NAME) $(WIN_IGNORE_ERR) diff --git a/plugins/notifications/splunk/go.mod b/plugins/notifications/splunk/go.mod new file mode 100644 index 0000000..9283808 --- /dev/null +++ b/plugins/notifications/splunk/go.mod @@ -0,0 +1,26 @@ +module github.com/crowdsecurity/splunk-plugin + +go 1.19 + +require ( + github.com/crowdsecurity/crowdsec v1.4.1 + github.com/hashicorp/go-hclog v1.0.0 + github.com/hashicorp/go-plugin v1.4.2 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/fatih/color v1.13.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/oklog/run v1.0.0 // indirect + golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 // indirect + golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 // indirect + google.golang.org/grpc v1.45.0 // indirect + google.golang.org/protobuf v1.28.0 // indirect +) diff --git a/plugins/notifications/splunk/go.sum b/plugins/notifications/splunk/go.sum new file mode 100644 index 0000000..c9aba04 --- /dev/null +++ b/plugins/notifications/splunk/go.sum @@ -0,0 +1,183 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/crowdsecurity/crowdsec v1.4.1 h1:GNmOO3Thh710hSYEW0H+7BJCkMsrpafnM6et4cezxAc= +github.com/crowdsecurity/crowdsec v1.4.1/go.mod h1:du34G8w0vTwVucLoPoI5s1SiZoA7a8ZDAYlzV0ZInRM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0 h1:bkKf0BeBXcSYa7f5Fyi9gMuQ8gNsxeiNpZjR6VxNZeo= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-plugin v1.4.2 h1:yFvG3ufXXpqiMiZx9HLcaK3XbIqQ1WJFR/F1a2CuVw0= +github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1-0.20210427113832-6241f9ab9942 h1:t0lM6y/M5IiUZyvbBTcngso8SZEZICH7is9B6g/obVU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 h1:6mzvA99KwZxbOrxww4EvWVQUnN1+xEu9tafK5ZxkYeA= +golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a h1:N2T1jUrTQE9Re6TFF5PhvEHXHCguynGhKjWVsIUt5cY= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 h1:myaecH64R0bIEDjNORIel4iXubqzaHU1K2z8ajBwWcM= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/plugins/notifications/splunk/main.go b/plugins/notifications/splunk/main.go new file mode 100644 index 0000000..a9b4be5 --- /dev/null +++ b/plugins/notifications/splunk/main.go @@ -0,0 +1,119 @@ +package main + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" + "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + + "gopkg.in/yaml.v2" +) + +var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{ + Name: "splunk-plugin", + Level: hclog.LevelFromString("INFO"), + Output: os.Stderr, + JSONFormat: true, +}) + +type PluginConfig struct { + Name string `yaml:"name"` + URL string `yaml:"url"` + Token string `yaml:"token"` + LogLevel *string `yaml:"log_level"` +} + +type Splunk struct { + PluginConfigByName map[string]PluginConfig + Client http.Client +} + +type Payload struct { + Event string `json:"event"` +} + +func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { + if _, ok := s.PluginConfigByName[notification.Name]; !ok { + return &protobufs.Empty{}, fmt.Errorf("splunk invalid config name %s", notification.Name) + } + cfg := s.PluginConfigByName[notification.Name] + + if cfg.LogLevel != nil && *cfg.LogLevel != "" { + logger.SetLevel(hclog.LevelFromString(*cfg.LogLevel)) + } + + logger.Info(fmt.Sprintf("received notify signal for %s config", notification.Name)) + + p := Payload{Event: notification.Text} + data, err := json.Marshal(p) + if err != nil { + return &protobufs.Empty{}, err + } + + req, err := http.NewRequest("POST", cfg.URL, strings.NewReader(string(data))) + if err != nil { + return &protobufs.Empty{}, err + } + + req.Header.Add("Authorization", fmt.Sprintf("Splunk %s", cfg.Token)) + logger.Debug(fmt.Sprintf("posting event %s to %s", string(data), req.URL)) + resp, err := s.Client.Do(req) + if err != nil { + return &protobufs.Empty{}, err + } + + if resp.StatusCode != 200 { + content, err := io.ReadAll(resp.Body) + if err != nil { + return &protobufs.Empty{}, fmt.Errorf("got non 200 response and failed to read error %s", err) + } + return &protobufs.Empty{}, fmt.Errorf("got non 200 response %s", string(content)) + } + respData, err := io.ReadAll(resp.Body) + if err != nil { + return &protobufs.Empty{}, fmt.Errorf("failed to read response body got error %s", err) + } + logger.Debug(fmt.Sprintf("got response %s", string(respData))) + return &protobufs.Empty{}, nil +} + +func (s *Splunk) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { + d := PluginConfig{} + err := yaml.Unmarshal(config.Config, &d) + s.PluginConfigByName[d.Name] = d + logger.Debug(fmt.Sprintf("Splunk plugin '%s' use URL '%s'", d.Name, d.URL)) + return &protobufs.Empty{}, err +} + +func main() { + var handshake = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "CROWDSEC_PLUGIN_KEY", + MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), + } + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + client := &http.Client{Transport: tr} + + sp := &Splunk{PluginConfigByName: make(map[string]PluginConfig), Client: *client} + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: handshake, + Plugins: map[string]plugin.Plugin{ + "splunk": &protobufs.NotifierPlugin{ + Impl: sp, + }, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: logger, + }) +} diff --git a/plugins/notifications/splunk/splunk.yaml b/plugins/notifications/splunk/splunk.yaml new file mode 100644 index 0000000..43ed00b --- /dev/null +++ b/plugins/notifications/splunk/splunk.yaml @@ -0,0 +1,28 @@ +type: splunk # Don't change +name: splunk_default # Must match the registered plugin in the profile + +# One of "trace", "debug", "info", "warn", "error", "off" +log_level: info + +# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" +# group_threshold: # Amount of alerts that triggers a message before has expired, eg "10" +# max_retry: # Number of attempts to relay messages to plugins in case of error +# timeout: # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + +#------------------------- +# plugin-specific options + +# The following template receives a list of models.Alert objects +# The output goes in the splunk notification +format: | + {{.|toJson}} + +url: +token: + +--- + +# type: splunk +# name: splunk_second_notification +# ... + diff --git a/rpm/SOURCES/80-crowdsec.preset b/rpm/SOURCES/80-crowdsec.preset new file mode 100644 index 0000000..13f910d --- /dev/null +++ b/rpm/SOURCES/80-crowdsec.preset @@ -0,0 +1,3 @@ +# This file is part of crowdsec + +enable crowdsec.service \ No newline at end of file diff --git a/rpm/SOURCES/crowdsec.unit.patch b/rpm/SOURCES/crowdsec.unit.patch new file mode 100644 index 0000000..0ca489b --- /dev/null +++ b/rpm/SOURCES/crowdsec.unit.patch @@ -0,0 +1,13 @@ +--- config/crowdsec.service-orig 2022-03-24 09:46:16.581681532 +0000 ++++ config/crowdsec.service 2022-03-24 09:46:28.761681532 +0000 +@@ -5,8 +5,8 @@ + [Service] + Type=notify + Environment=LC_ALL=C LANG=C +-ExecStartPre=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t +-ExecStart=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml ++ExecStartPre=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t ++ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml + #ExecStartPost=/bin/sleep 0.1 + ExecReload=/bin/kill -HUP $MAINPID + diff --git a/rpm/SOURCES/user.patch b/rpm/SOURCES/user.patch new file mode 100644 index 0000000..b2df218 --- /dev/null +++ b/rpm/SOURCES/user.patch @@ -0,0 +1,11 @@ +--- config/config.yaml-orig 2021-09-08 12:04:29.758785098 +0200 ++++ config/config.yaml 2021-09-08 12:04:39.866856057 +0200 +@@ -32,7 +32,7 @@ + max_age: 7d + plugin_config: + user: nobody # plugin process would be ran on behalf of this user +- group: nogroup # plugin process would be ran on behalf of this group ++ group: nobody # plugin process would be ran on behalf of this group + api: + client: + insecure_skip_verify: false diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec new file mode 100644 index 0000000..fb6ab4e --- /dev/null +++ b/rpm/SPECS/crowdsec.spec @@ -0,0 +1,237 @@ + +Name: crowdsec +Version: %(echo $VERSION) +Release: %(echo $PACKAGE_NUMBER)%{?dist} +Summary: Crowdsec - An open-source, lightweight agent to detect and respond to bad behaviours. It also automatically benefits from our global community-wide IP reputation database + +License: MIT +URL: https://crowdsec.net +Source0: https://github.com/crowdsecurity/%{name}/archive/v%(echo $VERSION).tar.gz +Source1: 80-%{name}.preset +Patch0: crowdsec.unit.patch +Patch1: user.patch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: git +BuildRequires: make +BuildRequires: systemd +Requires: crontabs +%{?fc33:BuildRequires: systemd-rpm-macros} +%{?fc34:BuildRequires: systemd-rpm-macros} +%{?fc35:BuildRequires: systemd-rpm-macros} +%{?fc36:BuildRequires: systemd-rpm-macros} + +%define debug_package %{nil} + +%description + +%define version_number %(echo $VERSION) +%define releasever %(echo $RELEASEVER) +%global local_version v%{version_number}-%{releasever}-rpm +%global name crowdsec +%global __mangle_shebangs_exclude_from /usr/bin/env + +%prep +%setup -q -T -b 0 + +%patch0 +%patch1 + +%build +BUILD_VERSION=%{local_version} make build +sed -i "s#/usr/local/lib/crowdsec/plugins/#%{_libdir}/%{name}/plugins/#g" config/config.yaml + +%install +rm -rf %{buildroot} +mkdir -p %{buildroot}/etc/crowdsec/hub +mkdir -p %{buildroot}/etc/crowdsec/patterns +mkdir -p %{buildroot}%{_sharedstatedir}/%{name}/data +mkdir -p %{buildroot}%{_presetdir} + +mkdir -p %{buildroot}%{_sharedstatedir}/%{name}/plugins +mkdir -p %{buildroot}%{_sysconfdir}/crowdsec/notifications/ +mkdir -p %{buildroot}%{_libdir}/%{name}/plugins/ + + +install -m 755 -D cmd/crowdsec/crowdsec %{buildroot}%{_bindir}/%{name} +install -m 755 -D cmd/crowdsec-cli/cscli %{buildroot}%{_bindir}/cscli +install -m 755 -D wizard.sh %{buildroot}/usr/share/crowdsec/wizard.sh +install -m 644 -D config/crowdsec.service %{buildroot}%{_unitdir}/%{name}.service +install -m 644 -D config/patterns/* -t %{buildroot}%{_sysconfdir}/crowdsec/patterns +install -m 600 -D config/config.yaml %{buildroot}%{_sysconfdir}/crowdsec +install -m 644 -D config/simulation.yaml %{buildroot}%{_sysconfdir}/crowdsec +install -m 644 -D config/profiles.yaml %{buildroot}%{_sysconfdir}/crowdsec +install -m 644 -D config/console.yaml %{buildroot}%{_sysconfdir}/crowdsec +install -m 750 -D config/%{name}.cron.daily %{buildroot}%{_sysconfdir}/cron.daily/%{name} +install -m 644 -D %{SOURCE1} %{buildroot}%{_presetdir} + +install -m 551 plugins/notifications/slack/notification-slack %{buildroot}%{_libdir}/%{name}/plugins/ +install -m 551 plugins/notifications/http/notification-http %{buildroot}%{_libdir}/%{name}/plugins/ +install -m 551 plugins/notifications/splunk/notification-splunk %{buildroot}%{_libdir}/%{name}/plugins/ +install -m 551 plugins/notifications/email/notification-email %{buildroot}%{_libdir}/%{name}/plugins/ + +install -m 600 plugins/notifications/slack/slack.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ +install -m 600 plugins/notifications/http/http.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ +install -m 600 plugins/notifications/splunk/splunk.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ +install -m 600 plugins/notifications/email/email.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%{_bindir}/%{name} +%{_bindir}/cscli +%{_datadir}/%{name}/wizard.sh +%{_libdir}/%{name}/plugins/notification-slack +%{_libdir}/%{name}/plugins/notification-http +%{_libdir}/%{name}/plugins/notification-splunk +%{_libdir}/%{name}/plugins/notification-email +%{_sysconfdir}/%{name}/patterns/linux-syslog +%{_sysconfdir}/%{name}/patterns/ruby +%{_sysconfdir}/%{name}/patterns/nginx +%{_sysconfdir}/%{name}/patterns/junos +%{_sysconfdir}/%{name}/patterns/cowrie_honeypot +%{_sysconfdir}/%{name}/patterns/redis +%{_sysconfdir}/%{name}/patterns/firewalls +%{_sysconfdir}/%{name}/patterns/paths +%{_sysconfdir}/%{name}/patterns/java +%{_sysconfdir}/%{name}/patterns/postgresql +%{_sysconfdir}/%{name}/patterns/bacula +%{_sysconfdir}/%{name}/patterns/mcollective +%{_sysconfdir}/%{name}/patterns/rails +%{_sysconfdir}/%{name}/patterns/haproxy +%{_sysconfdir}/%{name}/patterns/nagios +%{_sysconfdir}/%{name}/patterns/mysql +%{_sysconfdir}/%{name}/patterns/ssh +%{_sysconfdir}/%{name}/patterns/tcpdump +%{_sysconfdir}/%{name}/patterns/exim +%{_sysconfdir}/%{name}/patterns/bro +%{_sysconfdir}/%{name}/patterns/modsecurity +%{_sysconfdir}/%{name}/patterns/aws +%{_sysconfdir}/%{name}/patterns/smb +%{_sysconfdir}/%{name}/patterns/mongodb +%config(noreplace) %{_sysconfdir}/%{name}/config.yaml +%config(noreplace) %{_sysconfdir}/%{name}/simulation.yaml +%config(noreplace) %{_sysconfdir}/%{name}/profiles.yaml +%config(noreplace) %{_sysconfdir}/%{name}/console.yaml +%config(noreplace) %{_presetdir}/80-%{name}.preset +%config(noreplace) %{_sysconfdir}/%{name}/notifications/http.yaml +%config(noreplace) %{_sysconfdir}/%{name}/notifications/slack.yaml +%config(noreplace) %{_sysconfdir}/%{name}/notifications/splunk.yaml +%config(noreplace) %{_sysconfdir}/%{name}/notifications/email.yaml +%config(noreplace) %{_sysconfdir}/cron.daily/%{name} + +%{_unitdir}/%{name}.service + +%ghost %{_sysconfdir}/%{name}/hub/.index.json +%ghost %{_localstatedir}/log/%{name}.log +%dir /var/lib/%{name}/data/ + +%ghost %{_sysconfdir}/crowdsec/local_api_credentials.yaml +%ghost %{_sysconfdir}/crowdsec/online_api_credentials.yaml +%ghost %{_sysconfdir}/crowdsec/acquis.yaml + +%pre + +#systemctl stop crowdsec || true + +if [ $1 == 2 ];then + if [[ ! -d /var/lib/crowdsec/backup ]]; then + cscli config backup /var/lib/crowdsec/backup + fi +fi + + +%post -p /bin/bash + +#install +if [ $1 == 1 ]; then + + if [ ! -f "/var/lib/crowdsec/data/crowdsec.db" ] ; then + touch /var/lib/crowdsec/data/crowdsec.db + fi + + echo $SHELL + . /usr/share/crowdsec/wizard.sh -n + + echo Creating acquisition configuration + if [ ! -f "/etc/crowsec/acquis.yaml" ] ; then + set +e + SILENT=true detect_services + SILENT=true TMP_ACQUIS_FILE_SKIP=skip genacquisition + set +e + fi + if [ ! -f "%{_sysconfdir}/crowdsec/online_api_credentials.yaml" ] && [ ! -f "%{_sysconfdir}/crowdsec/local_api_credentials.yaml" ] ; then + install -m 600 /dev/null %{_sysconfdir}/crowdsec/online_api_credentials.yaml + install -m 600 /dev/null %{_sysconfdir}/crowdsec/local_api_credentials.yaml + cscli capi register + cscli machines add -a + fi + if [ ! -f "%{_sysconfdir}/crowdsec/online_api_credentials.yaml" ] ; then + touch %{_sysconfdir}/crowdsec/online_api_credentials.yaml + cscli capi register + fi + if [ ! -f "%{_sysconfdir}/crowdsec/local_api_credentials.yaml" ] ; then + touch %{_sysconfdir}/crowdsec/local_api_credentials.yaml + cscli machines add -a + fi + + cscli hub update + CSCLI_BIN_INSTALLED="/usr/bin/cscli" SILENT=true install_collection + +#upgrade +elif [ $1 == 2 ] && [ -d /var/lib/crowdsec/backup ]; then + cscli config restore /var/lib/crowdsec/backup + if [ $? == 0 ]; then + rm -rf /var/lib/crowdsec/backup + fi + + if [[ -f %{_sysconfdir}/crowdsec/online_api_credentials.yaml ]] ; then + chmod 600 %{_sysconfdir}/crowdsec/online_api_credentials.yaml + fi + + if [[ -f %{_sysconfdir}/crowdsec/local_api_credentials.yaml ]] ; then + chmod 600 %{_sysconfdir}/crowdsec/local_api_credentials.yaml + fi +fi + +%systemd_post %{name}.service + +if [ $1 == 1 ]; then + API=$(cscli config show --key "Config.API.Server") + if [ "$API" = "" ] ; then + LAPI=false + else + PORT=$(cscli config show --key "Config.API.Server.ListenURI"|cut -d ":" -f2) + fi + if [ "$LAPI" = false ] || [ -z "$(ss -nlt "sport = ${PORT}" | grep -v ^State)" ] ; then + %if 0%{?fc35} || 0%{?fc36} + systemctl enable crowdsec + %endif + systemctl start crowdsec || echo "crowdsec is not started" + else + echo "Not attempting to start crowdsec, port ${PORT} is already used or lapi was disabled" + echo "This port is configured through /etc/crowdsec/config.yaml and /etc/crowdsec/local_api_credentials.yaml" + fi +fi + +%preun + +#systemctl stop crowdsec || echo "crowdsec was not started" + +%systemd_preun %{name}.service + +%postun + +%systemd_postun_with_restart %{name}.service + +if [ $1 == 0 ]; then + rm -rf /etc/crowdsec/hub +fi + +#systemctl stop crowdsec || echo "crowdsec was not started" + +%changelog +* Tue Feb 16 2021 Manuel Sabban +- First initial packaging diff --git a/scripts/check_go_version.ps1 b/scripts/check_go_version.ps1 new file mode 100644 index 0000000..ddc68ce --- /dev/null +++ b/scripts/check_go_version.ps1 @@ -0,0 +1,19 @@ +##This must be called with $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) $(MINIMUM_SUPPORTED_GO_MINOR_VERSION) in this order +$min_major=$args[0] +$min_minor=$args[1] +$goversion = (go env GOVERSION).replace("go","").split(".") +$goversion_major=$goversion[0] +$goversion_minor=$goversion[1] +$err_msg="Golang version $goversion_major.$goversion_minor is not supported, please use least $min_major.$min_minor" + +if ( $goversion_major -gt $min_major ) { + exit 0; +} +elseif ($goversion_major -lt $min_major) { + Write-Output $err_msg; + exit 1; +} +elseif ($goversion_minor -lt $min_minor) { + Write-Output $(GO_VERSION_VALIDATION_ERR_MSG); + exit 1; +} \ No newline at end of file diff --git a/scripts/test_env.ps1 b/scripts/test_env.ps1 new file mode 100644 index 0000000..3d8e18a --- /dev/null +++ b/scripts/test_env.ps1 @@ -0,0 +1,90 @@ +#this is is straight up conversion of test_env.sh, not pretty but does the job + +param ( + [string]$base = ".\tests", + [switch]$help = $false +) + +function show_help() { + Write-Output ".\test_env.ps1 -d tests #creates test env in .\tests" +} + +function create_arbo() { + $null = New-Item -ItemType Directory $data_dir + $null = New-Item -ItemType Directory $log_dir + $null = New-Item -ItemType Directory $config_dir + $null = New-Item -ItemType Directory $parser_dir + $null = New-Item -ItemType Directory $parser_s00 + $null = New-Item -ItemType Directory $parser_s01 + $null = New-Item -ItemType Directory $parser_s02 + $null = New-Item -ItemType Directory $scenarios_dir + $null = New-Item -ItemType Directory $postoverflows_dir + $null = New-Item -ItemType Directory $cscli_dir + $null = New-Item -ItemType Directory $hub_dir + $null = New-Item -ItemType Directory $config_dir\$notif_dir + $null = New-Item -ItemType Directory $base\$plugins_dir +} + +function copy_file() { + $null = Copy-Item ".\config\profiles.yaml" $config_dir + $null = Copy-Item ".\config\simulation.yaml" $config_dir + $null = Copy-Item ".\cmd\crowdsec\crowdsec.exe" $base + $null = Copy-Item ".\cmd\crowdsec-cli\cscli.exe" $base + $null = Copy-Item -Recurse ".\config\patterns" $config_dir + $null = Copy-Item ".\config\acquis.yaml" $config_dir + $null = New-Item -ItemType File $config_dir\local_api_credentials.yaml + $null = New-Item -ItemType File $config_dir\online_api_credentials.yaml + #envsubst < "./config/dev.yaml" > $BASE/dev.yaml + Copy-Item .\config\dev.yaml $base\dev.yaml + $plugins | ForEach-Object { + Copy-Item $plugins_dir\$notif_dir\$_\notification-$_.exe $base\$plugins_dir\notification-$_.exe + Copy-Item $plugins_dir\$notif_dir\$_\$_.yaml $config_dir\$notif_dir\$_.yaml + } +} + +function setup() { + & $base\cscli.exe -c "$config_file" hub update + & $base\cscli.exe -c "$config_file" collections install crowdsecurity/linux crowdsecurity/windows +} + +function setup_api() { + & $base\cscli.exe -c "$config_file" machines add test -p testpassword -f $config_dir\local_api_credentials.yaml --force +} + +if ($help) { + show_help + exit 0; +} + +$null = New-Item -ItemType Directory $base + +$base=(Resolve-Path $base).Path +$data_dir="$base\data" +$log_dir="$base\logs\" +$config_dir="$base\config" +$config_file="$base\dev.yaml" +$cscli_dir="$config_dir\crowdsec-cli" +$parser_dir="$config_dir\parsers" +$parser_s00="$parser_dir\s00-raw" +$parser_s01="$parser_dir\s01-parse" +$parser_s02="$parser_dir\s02-enrich" +$scenarios_dir="$config_dir\scenarios" +$postoverflows_dir="$config_dir\postoverflows" +$hub_dir="$config_dir\hub" +$plugins=@("http", "slack", "splunk") +$plugins_dir="plugins" +$notif_dir="notifications" + + +Write-Output "Creating test arbo in $base" +create_arbo +Write-Output "Arbo created" +Write-Output "Copying files" +copy_file +Write-Output "Files copied" +Write-Output "Setting up configuration" +$cur_path=$pwd +Set-Location $base +setup_api +setup +Set-Location $cur_path \ No newline at end of file diff --git a/scripts/test_env.sh b/scripts/test_env.sh new file mode 100755 index 0000000..b203e7f --- /dev/null +++ b/scripts/test_env.sh @@ -0,0 +1,122 @@ +#!/bin/bash + +BASE="./tests" + +usage() { + echo "Usage:" + echo " ./wizard.sh -h Display this help message." + echo " ./test_env.sh -d ./tests Create test environment in './tests' folder" + exit 0 +} + + +while [[ $# -gt 0 ]] +do + key="${1}" + case ${key} in + -d|--directory) + BASE=${2} + shift #past argument + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) # unknown option + log_err "Unknown argument ${key}." + usage + exit 1 + ;; + esac +done + +BASE=$(realpath $BASE) + +DATA_DIR="$BASE/data" + +LOG_DIR="$BASE/logs/" + +CONFIG_DIR="$BASE/config" +CONFIG_FILE="$BASE/dev.yaml" +CSCLI_DIR="$CONFIG_DIR/crowdsec-cli" +PARSER_DIR="$CONFIG_DIR/parsers" +PARSER_S00="$PARSER_DIR/s00-raw" +PARSER_S01="$PARSER_DIR/s01-parse" +PARSER_S02="$PARSER_DIR/s02-enrich" +SCENARIOS_DIR="$CONFIG_DIR/scenarios" +POSTOVERFLOWS_DIR="$CONFIG_DIR/postoverflows" +HUB_DIR="$CONFIG_DIR/hub" +PLUGINS="http slack splunk email" +PLUGINS_DIR="plugins" +NOTIF_DIR="notifications" + +log_info() { + msg=$1 + date=$(date +%x:%X) + echo -e "[$date][INFO] $msg" +} + +create_arbo() { + mkdir -p "$BASE" + mkdir -p "$DATA_DIR" + mkdir -p "$LOG_DIR" + mkdir -p "$CONFIG_DIR" + mkdir -p "$PARSER_DIR" + mkdir -p "$PARSER_S00" + mkdir -p "$PARSER_S01" + mkdir -p "$PARSER_S02" + mkdir -p "$SCENARIOS_DIR" + mkdir -p "$POSTOVERFLOWS_DIR" + mkdir -p "$CSCLI_DIR" + mkdir -p "$HUB_DIR" + mkdir -p "$CONFIG_DIR/$NOTIF_DIR/$plugin" + mkdir -p "$BASE/$PLUGINS_DIR" +} + +copy_files() { + cp "./config/profiles.yaml" "$CONFIG_DIR" + cp "./config/simulation.yaml" "$CONFIG_DIR" + cp "./cmd/crowdsec/crowdsec" "$BASE" + cp "./cmd/crowdsec-cli/cscli" "$BASE" + cp -r "./config/patterns" "$CONFIG_DIR" + cp "./config/acquis.yaml" "$CONFIG_DIR" + touch "$CONFIG_DIR"/local_api_credentials.yaml + touch "$CONFIG_DIR"/online_api_credentials.yaml + envsubst < "./config/dev.yaml" > $BASE/dev.yaml + for plugin in $PLUGINS + do + cp $PLUGINS_DIR/$NOTIF_DIR/$plugin/notification-$plugin $BASE/$PLUGINS_DIR/notification-$plugin + cp $PLUGINS_DIR/$NOTIF_DIR/$plugin/$plugin.yaml $CONFIG_DIR/$NOTIF_DIR/$plugin.yaml + done +} + + +setup() { + $BASE/cscli -c "$CONFIG_FILE" hub update + $BASE/cscli -c "$CONFIG_FILE" collections install crowdsecurity/linux +} + +setup_api() { + $BASE/cscli -c "$CONFIG_FILE" machines add test -p testpassword -f $CONFIG_DIR/local_api_credentials.yaml --force +} + + +main() { + log_info "Creating test arboresence in $BASE" + create_arbo + log_info "Arboresence created" + log_info "Copying needed files for tests environment" + copy_files + log_info "Files copied" + log_info "Setting up configurations" + CURRENT_PWD=$(pwd) + cd $BASE + setup_api + setup + cd $CURRENT_PWD + log_info "Environment is ready in $BASE" +} + + +main diff --git a/scripts/test_wizard_upgrade.sh b/scripts/test_wizard_upgrade.sh new file mode 100755 index 0000000..58039e1 --- /dev/null +++ b/scripts/test_wizard_upgrade.sh @@ -0,0 +1,359 @@ +#! /usr/bin/env bash +# -*- coding: utf-8 -*- + +# Codes +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' +OK_STR="${GREEN}OK${NC}" +FAIL_STR="${RED}FAIL${NC}" + +CURRENT_FOLDER=$(pwd) + +BOUNCER_VERSION="v0.0.6" +RELEASE_FOLDER="" + +HUB_AVAILABLE_PARSERS="/etc/crowdsec/hub/parsers" +HUB_AVAILABLE_SCENARIOS="/etc/crowdsec/hub/scenarios" +HUB_AVAILABLE_COLLECTIONS="/etc/crowdsec/hub/collections" +HUB_AVAILABLE_PO="/etc/crowdsec/hub/postoverflows" + +HUB_ENABLED_PARSERS="/etc/crowdsec/parsers" +HUB_ENABLED_SCENARIOS="/etc/crowdsec/scenarios" +HUB_ENABLED_COLLECTIONS="/etc/crowdsec/collections" +HUB_ENABLED_PO="/etc/crowdsec/postoverflows" + +ACQUIS_FILE="/etc/crowdsec/acquis.yaml" +PROFILE_FILE="/etc/crowdsec/profiles.yaml" +CONFIG_FILE="/etc/crowdsec/config.yaml" +LOCAL_API_FILE="/etc/crowdsec/local_api_credentials.yaml" +ONLINE_API_FILE="/etc/crowdsec/online_api_credentials.yaml" +SIMULATION_FILE="/etc/crowdsec/simulation.yaml" +DB_FILE="/var/lib/crowdsec/data/crowdsec.db" + +SYSTEMD_FILE="/etc/systemd/system/crowdsec.service" + +BOUNCER_FOLDER="/etc/crowdsec/cs-firewall-bouncer" + +MUST_FAIL=0 + +function init +{ + which git > /dev/null + if [ $? -ne 0 ]; then + echo "git is needed this test, exiting ..." + fi + if [[ -z ${RELEASE_FOLDER} ]]; + then + cd .. + BUILD_VERSION=${CROWDSEC_VERSION} make release + if [ $? != 0 ]; then + echo "Unable to make the release (make sure you have go installed), exiting" + exit 1 + fi + RELEASE_FOLDER="crowdsec-${CROWDSEC_VERSION}" + fi + cp -r ${RELEASE_FOLDER} ${CURRENT_FOLDER} + cd ${CURRENT_FOLDER} + + + echo "[*] Installing crowdsec (bininstall)" + cd ${RELEASE_FOLDER}/ + ./wizard.sh --bininstall + cd ${CURRENT_FOLDER} + cscli hub update + cscli collections install crowdsecurity/sshd + cscli postoverflows install crowdsecurity/cdn-whitelist + cscli machines add -a + systemctl start crowdsec + + + echo "[*] Install firewall bouncer" + wget https://github.com/crowdsecurity/cs-firewall-bouncer/releases/download/${BOUNCER_VERSION}/cs-firewall-bouncer.tgz + tar xzvf cs-firewall-bouncer.tgz + cd cs-firewall-bouncer-${BOUNCER_VERSION}/ + (echo "iptables" | sudo ./install.sh) || (echo "Unable to install cs-firewall-bouncer" && exit 1) + cd ${CURRENT_FOLDER} + + echo "[*] Tainting parser /etc/crowdsec/parsers/s01-parse/sshd-logs.yaml" + echo " # test taint parser" >> /etc/crowdsec/parsers/s01-parse/sshd-logs.yaml + + echo "[*] Tainting scenario /etc/crowdsec/scenarios/ssh-bf.yaml" + echo " # test taint scenario" >> /etc/crowdsec/scenarios/ssh-bf.yaml + + echo "[*] Tainting postoverflow /etc/crowdsec/postoverflows/s01-whitelist/cdn-whitelist.yaml" + echo " # test taint postoverflow" >> /etc/crowdsec/postoverflows/s01-whitelist/cdn-whitelist.yaml + + echo "[*] Tainting new systemd configuration file" + echo " # test taint systemd file" >> ${RELEASE_FOLDER}/config/crowdsec.service + + echo "[*] Tainting profile file" + echo " # test taint profile file" >> ${PROFILE_FILE} + + echo "[*] Tainting acquis file" + echo " # test taint acquis file" >> ${ACQUIS_FILE} + + echo "[*] Tainting local_api_creds file" + echo " # test taint local_api_creds file" >> ${LOCAL_API_FILE} + + echo "[*] Tainting online_api_creds file" + echo " # test taint online_api_creds file" >> ${ONLINE_API_FILE} + + echo "[*] Tainting config file" + echo " # test taint config file" >> ${CONFIG_FILE} + + echo "[*] Tainting simulation file" + echo " # test taint simulation file" >> ${SIMULATION_FILE} + + echo "[*] Adding a decision" + cscli decisions add -i 1.2.3.4 + + + find ${HUB_ENABLED_PARSERS} -type l -exec md5sum "{}" + >> parsers_enabled.md5 + find ${HUB_ENABLED_SCENARIOS} -type l -exec md5sum "{}" + >> scenarios_enabled.md5 + find ${HUB_ENABLED_COLLECTIONS} -type l -exec md5sum "{}" + >> collections_enabled.md5 + find ${HUB_ENABLED_PO} -type l -exec md5sum "{}" + >> po_enabled.md5 + + md5sum ${ACQUIS_FILE} >> acquis.md5 + md5sum ${PROFILE_FILE} >> profile.md5 + md5sum ${LOCAL_API_FILE} >> local_api_creds.md5 + md5sum ${ONLINE_API_FILE} >> online_api_creds.md5 + md5sum ${CONFIG_FILE} >> config.md5 + md5sum ${SIMULATION_FILE} >> simulation.md5 + md5sum ${DB_FILE} >> db.md5 + md5sum ${SYSTEMD_FILE} >> systemd.md5 + + echo "[*] Setup done" + echo "[*] Launching the upgrade" + cd ${RELEASE_FOLDER}/ + ./wizard.sh --upgrade --force + cd ${CURRENT_FOLDER} + echo "[*] Upgrade done, checking results" +} + +function down +{ + cd ${RELEASE_FOLDER}/ + ./wizard.sh --uninstall + cd ${CURRENT_FOLDER} + rm -rf crowdsec-v* + rm -rf cs-firewall-bouncer-* + rm -f crowdsec-release.tgz + rm -f cs-firewall-bouncer.tgz + rm -- *.md5 +} + +function assert_equal +{ + echo "" + if [ "$1" = "$2" ]; then + echo -e "Status - ${GREEN}OK${NC}" + else + echo -e "Status - ${RED}FAIL${NC}" + echo "Details:" + echo "" + diff <(echo "$1" ) <(echo "$2") + MUST_FAIL=1 + fi + echo "-----------------------------------------------------------------------" +} + +function assert_not_equal +{ + echo "" + if [ "$1" != "$2" ]; then + echo -e "Status - ${GREEN}OK${NC}" + else + echo -e "Status - ${RED}FAIL${NC}" + echo "Details:" + echo "" + diff <(echo "$1" ) <(echo "$2") + MUST_FAIL=1 + fi + echo "-----------------------------------------------------------------------" +} + +function assert_folder_exists +{ + echo "" + if [ -d "${BOUNCER_FOLDER}" ] + then + echo -e "Status - ${GREEN}OK${NC}" + else + echo -e "Status - ${RED}FAIL${NC}" + echo "Folder '$1' doesn't exist, but should" + MUST_FAIL=1 + fi + echo "-----------------------------------------------------------------------" +} + +function test_enabled_parsers +{ + echo $FUNCNAME + new=$(find ${HUB_ENABLED_PARSERS} -type f -exec md5sum "{}" +) + old=$(cat parsers_enabled.md5) + assert_equal "$new" "$old" + +} + +function test_enabled_scenarios +{ + echo $FUNCNAME + new=$(find ${HUB_ENABLED_SCENARIOS} -type f -exec md5sum "{}" +) + old=$(cat scenarios_enabled.md5) + assert_equal "$new" "$old" + +} + +function test_enabled_collections +{ + echo $FUNCNAME + new=$(find ${HUB_ENABLED_COLLECTIONS} -type f -exec md5sum "{}" +) + old=$(cat collections_enabled.md5) + assert_equal "$new" "$old" + +} + +function test_enabled_po +{ + echo $FUNCNAME + new=$(find ${HUB_ENABLED_PO} -type f -exec md5sum "{}" +) + old=$(cat po_enabled.md5) + assert_equal "$new" "$old" +} + +function test_config_file +{ + echo $FUNCNAME + new=$(find ${CONFIG_FILE} -type f -exec md5sum "{}" +) + old=$(cat config.md5) + assert_equal "$new" "$old" +} + +function test_acquis_file +{ + echo $FUNCNAME + new=$(find ${ACQUIS_FILE} -type f -exec md5sum "{}" +) + old=$(cat acquis.md5) + assert_equal "$new" "$old" +} + +function test_local_api_creds_file +{ + echo $FUNCNAME + new=$(find ${LOCAL_API_FILE} -type f -exec md5sum "{}" +) + old=$(cat local_api_creds.md5) + assert_equal "$new" "$old" +} + + +function test_online_api_creds_file +{ + echo $FUNCNAME + new=$(find ${ONLINE_API_FILE} -type f -exec md5sum "{}" +) + old=$(cat online_api_creds.md5) + assert_equal "$new" "$old" +} + +function test_profile_file +{ + echo $FUNCNAME + new=$(find ${PROFILE_FILE} -type f -exec md5sum "{}" +) + old=$(cat profile.md5) + assert_equal "$new" "$old" +} + +function test_db_file +{ + echo $FUNCNAME + new=$(find ${DB_FILE} -type f -exec md5sum "{}" +) + old=$(cat db.md5) + assert_equal "$new" "$old" +} + +function test_simulation_file +{ + echo $FUNCNAME + new=$(find ${SIMULATION_FILE} -type f -exec md5sum "{}" +) + old=$(cat simulation.md5) + assert_equal "$new" "$old" +} + +function test_systemd_file +{ + echo $FUNCNAME + new=$(find ${SYSTEMD_FILE} -type f -exec md5sum "{}" +) + old=$(cat systemd.md5) + assert_not_equal "$new" "$old" +} + +function test_bouncer_dir +{ + echo $FUNCNAME + assert_folder_exists ${BOUNCER_FOLDER} +} + +function start_test +{ + echo "" + echo "-----------------------------------------------------------------------" + test_enabled_parsers + test_enabled_scenarios + test_enabled_collections + test_enabled_po + test_config_file + test_acquis_file + test_online_api_creds_file + test_local_api_creds_file + test_profile_file + test_simulation_file + test_db_file + test_systemd_file + test_bouncer_dir +} + + +usage() { + echo "Usage:" + echo "" + echo " ./test_wizard_upgrade.sh -h Display this help message." + echo " ./test_wizard_upgrade.sh Run all the testsuite. Go must be available to make the release" + echo " ./test_wizard_upgrade.sh --release If go is not installed, please provide a path to the crowdsec-vX.Y.Z release folder" + echo "" + exit 0 +} + +while [[ $# -gt 0 ]] +do + key="${1}" + case ${key} in + --version|-v) + CROWDSEC_VERSION="${2}" + shift #past argument + shift + ;; + --release|-r) + RELEASE_FOLDER="${2}" + shift #past argument + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) # unknown option + echo "Unknown argument ${key}." + usage + exit 1 + ;; + esac +done + + +init +start_test +down +if [ ${MUST_FAIL} -eq 1 ] +then + exit 1 +fi \ No newline at end of file diff --git a/tests/.gitignore b/tests/.gitignore new file mode 100644 index 0000000..522c09b --- /dev/null +++ b/tests/.gitignore @@ -0,0 +1,4 @@ +/local/ +/local-init/ +/.environment.sh +/dyn-bats/*.bats diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..ce8f0b1 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,426 @@ + +# What is this? + +This directory contains scripts for functional testing. The tests are run with +the [bats-core](https://github.com/bats-core/bats-core) framework, which is an +active fork of the older BATS (Bash Automated Testing System). + +With the addition of [the ansible playbooks](ansible/README.md) it is possible +to use VMs to test the binary packages, service management and other CPU +architectures. + +### cscli + +| Feature | Covered | Notes | +| :-------------------- | :----------------- | :------------------------- | +| `cscli alerts` | - | | +| `cscli bouncers` | `10_bouncers` | | +| `cscli capi` | `01_base` | `status` only | +| `cscli collections` | `20_collections` | | +| `cscli config` | `01_base` | minimal testing (no crash) | +| `cscli dashboard` | - | docker inside docker 😞 | +| `cscli decisions` | `9[78]_ipv[46]*` | | +| `cscli hub` | `dyn_bats/99_hub` | | +| `cscli lapi` | `01_base` | | +| `cscli machines` | `30_machines` | | +| `cscli metrics` | - | | +| `cscli parsers` | - | | +| `cscli postoverflows` | - | | +| `cscli scenarios` | - | | +| `cscli simulation` | `50_simulation` | | +| `cscli version` | `01_base` | | + +### crowdsec + +| Feature | Covered | Notes | +| :----------------------------- | :------------- | :----------------------------------------- | +| `systemctl` start/stop/restart | - | | +| agent behavior | `40_live-ban` | minimal testing (simple ssh-bf detection) | +| forensic mode | `40_cold-logs` | minimal testing (simple ssh-bf detection) | +| starting without LAPI | `02_nolapi` | | +| starting without agent | `03_noagent` | | +| starting without CAPI | `04_nocapi` | | +| prometheus testing | - | | + +### API + +| Feature | Covered | Notes | +| :----------------- | :--------------- | :----------- | +| alerts GET/POST | `9[78]_ipv[46]*` | | +| decisions GET/POST | `9[78]_ipv[46]*` | | +| stream mode | `99_lapi-stream-mode | | + + +# How to use it + +## pre-requisites + + - `git submodule init; git submodule update` + - `go install github.com/cloudflare/cfssl/cmd/cfssl@latest` + - `go install github.com/cloudflare/cfssl/cmd/cfssljson@latest` + - `go install github.com/mikefarah/yq/v4@latest` + - `base64` + - `bash>=4.4` + - `curl` + - `daemonize` + - `jq` + - `nc` + - `openssl` + - `openbsd-netcat` + - `python3` + +## Running all tests + +Run `make clean bats-all` to perform a test build + run. +To repeat test runs without rebuilding crowdsec, use `make bats-test`. + + +## Debugging tests + +See `./tests/run-tests --help` to run/debug specific tests. + +Example: `./tests/run-tests tests/bats/02_nolapi.bats -f "cscli config backup"` (the string is a regexp). +You need to provide a path for a test file or directory (even if it's the full 'tests/bats') to use the `-f` option. + + +# How does it work? + +In BATS, you write tests in the form of Bash functions that have unique +descriptions (the name of the test). You can do most things that you can +normally do in a shell function. If there is any error condition, the test +fails. A set of functions is provided to implement assertions, and a mechanism +of `setup`/`teardown` is provided a the level of individual tests (functions) +or group of tests (files). + +The stdout/stderr of the commands within the test function are captured by +bats-core and will only be shown if the test fails. If you want to always print +something to debug your test case, you can redirect the output to the file +descriptor 3: + +```sh +@test "mytest" { + echo "hello world!" >&3 + run some-command + assert_success + echo "goodbye." >&3 +} +``` + +If you do that, please remove it once the test development is finished, because +this practice breaks the TAP protocol (unless each line has a '#' as first +character, but really, it's better to avoid unnecessary output when tests succeed). + +You can find here the documentation for the main framework and the plugins we use in this test suite: + + - [bats-core tutorial](https://bats-core.readthedocs.io/en/stable/tutorial.html) + - [Writing tests](https://bats-core.readthedocs.io/en/stable/writing-tests.html) + - [bats-assert](https://github.com/bats-core/bats-assert) + - [bats-support](https://github.com/bats-core/bats-support) + - [bats-file](https://github.com/bats-core/bats-file) + - [bats-mock](https://github.com/grayhemp/bats-mock) + +> As it often happens with open source, the first results from search engines refer to the old, unmaintained forks. +> Be sure to use the links above to find the good versions. + +Since bats-core is [TAP (Test Anything Protocol)](https://testanything.org/) +compliant, its output is in a standardized format. It can be integrated with a +separate [tap reporter](https://www.npmjs.com/package/tape#pretty-reporters) or +included in a larger test suite. The TAP specification is pretty minimalist and +some glue may be needed. + + +Other tools that you can find useful: + + - [mikefarah/yq](https://github.com/mikefarah/yq) - to parse and update YAML files on the fly + - [aliou/bats.vim](https://github.com/aliou/bats.vim) - for syntax highlighting (use bash otherwise) + +# setup and teardown + +If you have read the bats-core tutorial linked above, you are aware of the +`setup` and `teardown` functions. + +What you may have overlooked is that the script body outside the functions is +executed multiple times, so we have to be careful of what we put there. + +Here we have a look at the execution flow with two tests: + +```sh +echo "begin" >&3 + +setup_file() { + echo "setup_file" >&3 +} + +teardown_file() { + echo "teardown_file" >&3 +} + +setup() { + echo "setup" >&3 +} + +teardown() { + echo "teardown" >&3 +} + +@test "test 1" { + echo "test #1" >&3 +} + +@test "test 2" { + echo "test #2" >&3 +} + +echo "end" >&3 +``` + +The above test suite produces the following output: + +``` +begin +end +setup_file +begin +end + ✓ test 1 +setup +test #1 +teardown +begin +end + ✓ test 2 +setup +test #2 +teardown +teardown_file +``` + +See how "begin" and "end" are repeated three times each? The code outside +setup/teardown/test functions is really executed three times (more as you add +more tests). You can put there variables or function definitions, but keep it +to a minimum and [don't write anything to the standard +output](https://bats-core.readthedocs.io/en/stable/writing-tests.html#code-outside-of-test-cases). +For most things you want to use `setup_file()` instead. + +But.. there is a but. Quoting from [the FAQ](https://bats-core.readthedocs.io/en/stable/faq.html): + +> You can simply source .sh files. However, be aware that source`ing +> files with errors outside of any function (or inside `setup_file) will trip +> up bats and lead to hard to diagnose errors. Therefore, it is safest to only +> source inside setup or the test functions themselves. + +This doesn't mean you can't do that, just that you're on your own if the is an error. + + +# Testing crowdsec + +## Fixtures + +For the purpose of functional tests, crowdsec and its companions (cscli, plugin +notifiers, bouncers) are installed in a local environment, which means tests +should not install or touch anything outside a `./tests/local` directory. This +includes binaries, configuration files, databases, data downloaded from +internet, logs... The use of `/tmp` is tolerated, but BATS also provides [three +useful +variables](https://bats-core.readthedocs.io/en/stable/writing-tests.html#special-variables): +`$BATS_SUITE_TMPDIR`, `$BATS_FILE_TMPDIR` and `$BATS_TEST_TMPDIR` that let you +ensure your desired level of isolation of temporary files across the tests. + +When built with `make bats-build`, the binaries will look there by default for +their configuration and data needs. So you can run `./local/bin/cscli` from +a shell with no need for further parameters. + +To set up the installation described above we provide a couple of scripts, +`instance-data` and `instance-crowdsec`. They manage fixture and background +processes; they are meant to be used in setup/teardown in several ways, +according to the specific needs of the group of tests in the file. + + - `instance-data make` + + Creates a tar file in `./local-init/init-config-data.tar`. + The file contains all the configuration, hub and database files needed + to restore crowdsec to a known initial state. + Things like `machines add ...`, `capi register`, `hub update`, `collections + install crowdsecurity/linux` are executed here so they don't need to be + repeated for each test or group of tests. + + - `instance-data load` + + Extracts the files created by `instance-data make` for use by the local + crowdsec instance. Crowdsec must not be running while this operation is + performed. + + - `instance-crowdsec [ start | stop ]` + + Runs (or stops) crowdsec as a background process. PID and lockfiles are + written in `./local/var/run/`. + + +Here are some ways to use these two scripts. + + - case 1: load a fresh crowsec instance + data for each test (01_base, 10_bouncers, 20_collections...) + + This offers the best isolation, but the tests run slower. More importantly, + since there is no concept of "grouping" tests in bats-core with the exception + of files, if you need to perform some setup that is common to two or more + tests, you will have to repeat the code. + + - case 2: load a fresh set of data for each test, but run crowdsec only for + the tests that need it, possibly after altering the configuration + (02_nolapi, 03_noagent, 04_nocapi, 40_live-ban) + + This is useful because: 1) you sometimes don't want crowdsec to run at all, + for example when testing `cscli` in isolation, or you may want to tweak the + configuration inside the test function before running the lapi/agent. See + how we use `yq` to change the YAML files to that effect. + + - case 3: start crowdsec with the initial set of configuration+data once, and keep it + running for all the tests (50_simulation, 98_ipv4, 98_ipv6) + + This offers no isolation across tests, which over time could break more + often as result, but you can rely on the test order to test more complex + scenarios with a reasonable performance and the least amount of code. + + +## status, stdout and stderr + +As we said, if any error occurs inside a test function, the test +fails immediately. You call `mycommand`, it exits with $? != 0, the test fails. + +But how to test the output, then? If we call `run mycommand`, then $? will be 0 +allowing the test to keep running. The real error status is stored in the +`$status` variable, and the command output and standard error content are put +together in the `$output` variable. By specifying `run --separate-stderr`, you +can have separated `$output` and `$stderr` variables. + +The above is better explained in the bats-core tutorial. If you have not read it +yet, now is a good time. + +The `$output` variable gets special treatment with the +[bats-support](https://github.com/bats-core/bats-support) and +[bats-assert][https://github.com/bats-core/bats-assert) plugins and can be +checked with `assert_*` commands. The `$stderr` variable does not have these, +but we can use `run echo "$stderr"` and then check `$output` with asserts. + +Remember that `run` always overwrites the `$output` variable, so if you consume +it with `run jq <(output)` you can only do it once, because the second time it +will read the output of the `jq` command. But you can construct a list of all +the values you want and check them all in a single step. + +Note that `<(output)` is substituted with the file name of a file descriptor, +so `mycmd <(output)` can become `mycmd /dev/fd/23`, `mycmd /tmp//sh-np.hpc7Zs` +or `mycmd /proc/self/fd/38` depending on the platform. To have it fed to +standard input, use `< <(output)`. + +See the `lib/*.sh` and `bats/*.bats` files for other tricks we employ. + +## file operations + +We included the [bats-file](https://github.com/bats-core/bats-file) plugin to +check the result of file system operations: existence, type/size/ownership checks +on files, symlinks, directories, sockets. + +## mocking external commands + +The [bats-mock](https://github.com/grayhemp/bats-mock) plugin allows you to define +a "fake" behavior for the external commands called by a package under test, and +to record and assert which parameters are passed to it. + +## gotchas + + - pay attention to tests that are not run - for example "bats warning: Executed 143 + instead of expected 144 tests". They are especially tricky to debug. + + - using the `load` command in `teardown()` causes tests to be silently skipped or break in "funny" + ways. The other functions seem safe. + +# Testing with MySQL and Postgres + +By default, the tests are run with the embedded sqlite database engine. This should be +enough in most cases, since the database operations are abstracted via the `ent` ORM. + +You can however easily test with a different engine. + +## Postgres + +Run Postgres somewhere, version 10 or above - easy to do in a docker container. + +You also need to install a postgresql-client package or equivalent, to provide +recent pg_dump and pg_restore executables (not older than the PG version in the docker container). + +``` +$ sudo docker run --detach --name=postgres -p 5432:5432 --env="POSTGRES_PASSWORD=postgres" postgres:latest +``` + +The name of the container is not really important. +If you are not using Docker, you may need to adjust the `PGHOST`/`PGPORT`/`PGPASSWORD`/`PGUSER` variables +(defaults are 127.0.0.1, 5432, postgres, postgres). + +An additional user and database both named `crowdsec_test` will be created. + +Now you can build and run the tests (we skip bats-test-hub here, they really +should not be affected by a change in DB). + +``` +$ export DB_BACKEND=postgres +$ make clean bats-build bats-fixture bats-test +``` + +or with the pgx driver: + +``` +$ export DB_BACKEND=pgx +$ make clean bats-build bats-fixture bats-test +``` + +The value of DB_BACKEND must not change between the build/fixture/test steps. + +## MySQL/MariaDB + +Same considerations as above, with the following changes: + +``` +$ sudo docker run --cap-add=sys_nice --detach --name=mysql -p 3306:3306 --env="MYSQL_ROOT_PASSWORD=password" mysql +[...] +$ export DB_BACKEND=mysql +$ make clean bats-build bats-fixture bats-test +``` + +or for MariaDB + +``` +$ sudo docker run --cap-add=sys_nice --detach --name=mariadb -p 3306:3306 --env="MYSQL_ROOT_PASSWORD=password" mariadb +``` + +A mysql-client package is required as well. + +## troubleshooting + + - CAPI is disabled, why? +Most tests don't need it. Helper scripts are provided in `tests/enable-capi` +and `tests/disable-capi` for interactive use, and two library functions +`config_enable_capi` and `config_disable_capi` to call inside the tests. +You still need to call `cscli capi register` after enabling it. + + - My tests are hanging forever, why? +See if you have a jq/yq or similar process waiting for standard input. Hint: +you can pass a file from the result of the previous `run` command with +`<(output)`. This substitutes the expression with a file name, but if you +really want it in standard input, you have to use `< <(output)`. Bash is +awesome but the syntax is often weird. + + - I can't do X with jq. +If you prefer you can use yq. It can parse and generate json, and it has a +different syntax. + + - I get "while parsing /tmp/....: yaml: line 5: mapping values are not allowed in this context" +Check the heredocs (the <` + +- `vagrant up --no-provision; vagrant provision`. The first command creates + the VM, the second installs all the dependencies, test suite and package + under test, then runs the tests. If you run a plain `vagrant up`, it does + everything with a single command, but also destroys the VM in case of test + failure so you are left with nothing to debug. + +- `vagrant destroy` when you want to remove the VM. If you want to free up the + space taken by the base VM images, they are in + `/var/lib/libvirt/images/*VAGRANT*` + +The above steps are automated in the script `./prepare-run` (requires bash +>=4.4). It takes an enviroment file, and optionally a list of directories with +vagrant configurations. With a single parameter, it loops over all the +directories in alphabetical order, excluding those in the `experimental` +directory. Watch out for running VMs if you break the loop by hand. + +After this, you will find up to 30GB of base images in `/var/lib/libvirt/images`, +which you need to remove by hand when you have finished testing or leave them +around for the next time. + +You can give more memory or CPU juice to the VMs by editing [Vagrantfile.common](vagrant/Vagrantfile.common). + +## Test Matrix + +Tests fail with unsupported configurations or when the environment is not prepared correctly +due to missing setup/teardown parts in Ansible or functional tests. False positives +are also possible due to timing issues or flaky network connections. + +If you have a result that deviates from the following matrix, that's probably a genuine bug or regression. +The data was created with crowdsec v1.4.1. + +| | source/sqlite | pkg/sqlite | source/postgres | source/pgx | source/mysql (0) | +| ------------------------- | ------------- | ---------- | --------------- | ---------- | ---------------- | +| AmazonLinux 2 | ✓ (1) | ✓ (1) | old-db | old-db | wip | +| CentOS 7 | ✓ | ✓ | old-db | old-db | ✓ | +| CentOS 8 | ✓ | ✓ | ✓ | ✓ | ✓ | +| CentOS 9 | ✓ | ✓ | ✓ | ✓ | ✓ | +| Debian 9 (stretch) | ✓ | ✓ | old-db | old-db | wip | +| Debian 10 (buster) | ✓ | ✓ | ✓ | ✓ | ✓ | +| Debian 11 (bullseye) | ✓ | ✓ | ✓ | ✓ | ✓ | +| Debian (testing/bookworm) | ✓ | ✓ | ✓ | ✓ | wip | +| Fedora 33 | ✓ | ✓ | wip | wip | wip | +| Fedora 34 | ✓ | ✓ | ✓ | ✓ | wip | +| Fedora 35 | ✓ | ✓ | ✓ | ✓ | wip | +| Fedora 36 | ✓ | ✓ | ✓ | ✓ | wip | +| FreeBSD 12 | ✓ | wip | wip | wip | wip | +| FreeBSD 13 | ✓ | wip | wip | wip | wip | +| Oracle 7 | ✓ | ✓ | old-db | old-db | ✓ | +| Oracle 8 | ✓ | ✓ | ✓ | ✓ | ✓ | +| Ubuntu 16.04 (xenial) | ✓ | ✓ | old-db | old-db | ✓ | +| Ubuntu 18.04 (bionic) | ✓ | ✓ | ✓ | ✓ | ✓ | +| Ubuntu 20.04 (focal) | ✓ | ✓ | ✓ | ✓ | ✓ | +| Ubuntu 22.04 (jammy) | ✓ | ✓ | ✓ | ✓ | ✓ | +| | | | | | | + +Note: all tests with `local/` are expected to pass for `pkg/` as well. + +wip - missing ansible or bats parts, could be fixed in a future release + +old-db - the database that ships with the distribution is not supported +(Postgres < 10). Won't fix, feel free to install the DB from an unofficial +repository. + +0 - MySQL or MariaDB, depending on distribution defaults + +1 - ansible may hang, passes all tests if run by hand diff --git a/tests/ansible/ansible.cfg b/tests/ansible/ansible.cfg new file mode 100644 index 0000000..66f27c2 --- /dev/null +++ b/tests/ansible/ansible.cfg @@ -0,0 +1,15 @@ +[defaults] +pipelining = True +force_color = True +nocows = True + +# inventory = inventory.yml +callbacks_enabled = timer + +# more compact and readable output +stdout_callback = debug +display_skipped_hosts = False +display_ok_hosts = True + +[ssh_connection] +ssh_args = -o ControlMaster=auto -o ControlPersist=60s diff --git a/tests/ansible/env/example.sh b/tests/ansible/env/example.sh new file mode 100755 index 0000000..f86ee3e --- /dev/null +++ b/tests/ansible/env/example.sh @@ -0,0 +1,51 @@ +#!/bin/sh + +## DB_BACKEND is required, because even if it has a sensible default (sqlite) +## all other variables can have an empty value. So if DB_BACKEND is missing you +## may have forgot to set the environment for the test run. +## One of "sqlite", "postgres", "pgx", "mysql" +DB_BACKEND=sqlite + +## Set this to test a binary package (deb, rpm..). If missing or false, +## crowdsec will be built from sources and tested an non-root without installation. +# PACKAGE_TESTING=true + +## The URL of a crowdsec repository with the test scripts. +# TEST_SUITE_GIT="https://github.com/crowdsecurity/crowdsec" + +## The branch, tag or commit of the test scripts. +# TEST_SUITE_VERSION="master" + +## The path to a crowdsec.zip file containing the crowdsec sources with test scripts. +## Overrides TEST_SUITE_GIT and TEST_SUITE_VERSION. +# TEST_SUITE_ZIP="/tmp/crowdsec.zip" + +## TEST_PACKAGE_VERSION_DEB is the version of the package under test. +## Can be different from TEST_PACKAGE_VERSION_RPM in case of stable releases (no '-1' suffix). +# TEST_PACKAGE_VERSION_DEB=1.4.1 + +## TEST_PACKAGE_VERSION_RPM is the version of the package under test. +## Can be different from TEST_PACKAGE_VERSION_DEB in case of stable releases (rpm requires a '-1' suffix). +# TEST_PACKAGE_VERSION_RPM=1.4.1-1 + +## The path to a crowdsec binary package (.deb, .rpm..). If both this and TEST_PACKAGE_VERSION_* are set, +## the package from TEST_PACKAGE_VERSION_* will be installed first, then replaced by the package in the +## provided file. This is a way to test upgrades. +# TEST_PACKAGE_FILE="/tmp/crowdsec.deb" + +## The path to a bundle with all the .deb and .rpm packages, split by architecture, distribution and version (see README). +# TEST_PACKAGE_DIR=/path/to/packages/1.4.1-rc1 + +## A comma-separated list of test scripts to skip. Example: "02_nolapi.bats,03_noagent.bats" +# TEST_SKIP= + +export DB_BACKEND +export PACKAGE_TESTING +export TEST_SUITE_GIT +export TEST_SUITE_VERSION +export TEST_SUITE_ZIP +export TEST_PACKAGE_VERSION_DEB +export TEST_PACKAGE_VERSION_RPM +export TEST_PACKAGE_FILE +export TEST_PACKAGE_DIR +export TEST_SKIP diff --git a/tests/ansible/env/pkg-sqlite.sh b/tests/ansible/env/pkg-sqlite.sh new file mode 100755 index 0000000..a161260 --- /dev/null +++ b/tests/ansible/env/pkg-sqlite.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +DB_BACKEND=sqlite +PACKAGE_TESTING=true +TEST_PACKAGE_VERSION_DEB=1.4.1 +TEST_PACKAGE_VERSION_RPM=1.4.1-1 + +export DB_BACKEND +export PACKAGE_TESTING +export TEST_SUITE_GIT +export TEST_SUITE_VERSION +export TEST_SUITE_ZIP +export TEST_PACKAGE_VERSION_DEB +export TEST_PACKAGE_VERSION_RPM +export TEST_PACKAGE_FILE +export TEST_PACKAGE_DIR +export TEST_SKIP diff --git a/tests/ansible/env/source-mysql.sh b/tests/ansible/env/source-mysql.sh new file mode 100755 index 0000000..599e522 --- /dev/null +++ b/tests/ansible/env/source-mysql.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +DB_BACKEND=mysql + +export DB_BACKEND +export PACKAGE_TESTING +export TEST_SUITE_GIT +export TEST_SUITE_VERSION +export TEST_SUITE_ZIP +export TEST_PACKAGE_VERSION_DEB +export TEST_PACKAGE_VERSION_RPM +export TEST_PACKAGE_FILE +export TEST_PACKAGE_DIR +export TEST_SKIP diff --git a/tests/ansible/env/source-pgx.sh b/tests/ansible/env/source-pgx.sh new file mode 100755 index 0000000..b23771f --- /dev/null +++ b/tests/ansible/env/source-pgx.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +DB_BACKEND=pgx + +export DB_BACKEND +export PACKAGE_TESTING +export TEST_SUITE_GIT +export TEST_SUITE_VERSION +export TEST_SUITE_ZIP +export TEST_PACKAGE_VERSION_DEB +export TEST_PACKAGE_VERSION_RPM +export TEST_PACKAGE_FILE +export TEST_PACKAGE_DIR +export TEST_SKIP diff --git a/tests/ansible/env/source-postgres.sh b/tests/ansible/env/source-postgres.sh new file mode 100755 index 0000000..7f76f4e --- /dev/null +++ b/tests/ansible/env/source-postgres.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +DB_BACKEND=postgres + +export DB_BACKEND +export PACKAGE_TESTING +export TEST_SUITE_GIT +export TEST_SUITE_VERSION +export TEST_SUITE_ZIP +export TEST_PACKAGE_VERSION_DEB +export TEST_PACKAGE_VERSION_RPM +export TEST_PACKAGE_FILE +export TEST_PACKAGE_DIR +export TEST_SKIP diff --git a/tests/ansible/env/source-sqlite.sh b/tests/ansible/env/source-sqlite.sh new file mode 100755 index 0000000..d1d585a --- /dev/null +++ b/tests/ansible/env/source-sqlite.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +DB_BACKEND=sqlite + +export DB_BACKEND +export PACKAGE_TESTING +export TEST_SUITE_GIT +export TEST_SUITE_VERSION +export TEST_SUITE_ZIP +export TEST_PACKAGE_VERSION_DEB +export TEST_PACKAGE_VERSION_RPM +export TEST_PACKAGE_FILE +export TEST_PACKAGE_DIR +export TEST_SKIP diff --git a/tests/ansible/install_binary_package.yml b/tests/ansible/install_binary_package.yml new file mode 100644 index 0000000..1c0740a --- /dev/null +++ b/tests/ansible/install_binary_package.yml @@ -0,0 +1,112 @@ +# vim: set ft=yaml.ansible: +--- + +- name: "Install and set up binary crowdsec package..." + hosts: all + gather_facts: true + tasks: + + - name: "Hardcode master branch for the hub, temporary override before install (config.yaml.local)" + become: true + block: + - name: "Create /etc/crowdsec" + ansible.builtin.file: + path: "/etc/crowdsec" + state: directory + mode: 0o0755 + - name: "Create /etc/crowdsec/config.yaml.local" + ansible.builtin.copy: + dest: "/etc/crowdsec/config.yaml.local" + content: "{{ config_yaml_local | to_nice_yaml }}" + mode: 0o600 + vars: + config_yaml_local: + cscli: + hub_branch: master + when: + - (package_version_deb | length > 0) or + (package_version_rpm | length > 0) or + (package_file | length > 0) or + (package_dir | length > 0) + + - name: "Install crowdsec binaries from a binary repository" + ansible.builtin.include_role: + name: crowdsecurity.testing.install_package_from_repo + when: (package_version_deb | length > 0) or + (package_version_rpm | length > 0) + + - name: "Install crowdsec binaries from a package file" + ansible.builtin.include_role: + name: crowdsecurity.testing.install_package_from_file + when: package_file | length > 0 + + - name: "Install crowdsec binaries from a package directory" + ansible.builtin.include_role: + name: crowdsecurity.testing.install_package_from_pkgdir + when: package_dir | length > 0 + + - name: "Hardcode master branch for the hub, for real this time" + become: true + block: + - name: "Read config.yaml" + ansible.builtin.slurp: + path: "/etc/crowdsec/config.yaml" + register: config_yaml + - name: "Create fact from config.yaml" + ansible.builtin.set_fact: + config_data: "{{ config_yaml['content'] | b64decode | from_yaml }}" + - name: "Patch dictionary" + ansible.builtin.set_fact: + config_data: "{{ config_data | combine(config_patch, recursive=True) }}" + vars: + config_patch: + cscli: + hub_branch: master + - name: "Write patched config.yaml" + ansible.builtin.copy: + content: '{{ config_data | to_nice_yaml }}' + dest: "/etc/crowdsec/config.yaml" + # preserve mode to be able to test permissions from package + mode: preserve + - name: "Remove config.yaml.local" + ansible.builtin.file: + path: "/etc/crowdsec/config.yaml.local" + state: absent + when: + - (package_version_deb | length > 0) or + (package_version_rpm | length > 0) or + (package_file | length > 0) or + (package_dir | length > 0) + + # this is required to avoid fatal errors in case systemctl is not working + # (which happens on some aws instances) + - name: "Override acquis.yaml for package testing" + become: true + ansible.builtin.copy: + dest: "/etc/crowdsec/acquis.yaml" + content: "{{ acquis_yaml | to_nice_yaml }}" + mode: preserve + vars: + acquis_yaml: + filenames: + - /tmp/should-not-exist.log + labels: + type: syslog + force_inotify: true + when: + - (package_version_deb | length > 0) or + (package_version_rpm | length > 0) or + (package_file | length > 0) or + (package_dir | length > 0) + + vars: + package_version_deb: >- + {{ lookup('ansible.builtin.env', 'TEST_PACKAGE_VERSION_DEB') }} + package_version_rpm: >- + {{ lookup('ansible.builtin.env', 'TEST_PACKAGE_VERSION_RPM') }} + package_file: >- + {{ lookup('ansible.builtin.env', 'TEST_PACKAGE_FILE') }} + package_dir: >- + {{ lookup('ansible.builtin.env', 'TEST_PACKAGE_DIR') }} + binary_package_name: >- + crowdsec diff --git a/tests/ansible/prepare-run b/tests/ansible/prepare-run new file mode 100755 index 0000000..45b4b5b --- /dev/null +++ b/tests/ansible/prepare-run @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# This loops over all the available boxes, running the test suite on each one. +# The results are collected in a file. If the file already exists, tests are not run again. + +env=$1 + +if [[ -z "${env}" ]]; then + echo "Usage: $0 [vagrant-dir]..." + exit 1 +fi + +shift + +vagrant_dirs=("$@") +if [[ $# -eq 0 ]]; then + # find all targets, with possibly weird names, don't go into subfolders (like 'experimental/') + readarray -d '' vagrant_dirs < <(find vagrant -mindepth 1 -maxdepth 1 -type d -print0 | sort -z | grep -z -v .vagrant) +fi + +#shellcheck disable=SC1090 +. "${env}" + +VAGRANT_FORCE_COLOR=true +export VAGRANT_FORCE_COLOR + +for vm in "${vagrant_dirs[@]}"; do + outfile="$(basename "${env}").out" + pushd "${vm}" >/dev/null || exit + if [[ ! -f "Vagrantfile" ]]; then + popd >/dev/null || exit + continue + fi + echo "Prepare and run tests on ${vm}..." + if [[ -x "skip" ]]; then + if ! ./skip; then + popd >/dev/null || exit + continue + fi + fi + if [[ ! -f "${outfile}" ]]; then + vagrant up --no-provision + vagrant provision 2>&1 | tee "${outfile}" + vagrant destroy -f + else + echo "skipping: ${vm}, file ${outfile} already exists." >&2 + fi + popd >/dev/null || exit +done diff --git a/tests/ansible/prepare_tests.yml b/tests/ansible/prepare_tests.yml new file mode 100644 index 0000000..0249d68 --- /dev/null +++ b/tests/ansible/prepare_tests.yml @@ -0,0 +1,21 @@ +# vim: set ft=yaml.ansible: +--- + +- name: "Prepare fixture for the functional tests" + hosts: all + gather_facts: true + vars_files: + - vars/go.yml + - vars/mysql.yml + - vars/postgres.yml + roles: + - name: make_fixture + environment: + PGHOST: 127.0.0.1 + PGPORT: 5432 + PGPASSWORD: "{{ postgresql_users[0].password }}" + PGUSER: postgres + MYSQL_HOST: localhost + MYSQL_PORT: 3306 + MYSQL_PASSWORD: "{{ mysql_root_password }}" + MYSQL_USER: "root" diff --git a/tests/ansible/provision_dependencies.yml b/tests/ansible/provision_dependencies.yml new file mode 100644 index 0000000..891bcc1 --- /dev/null +++ b/tests/ansible/provision_dependencies.yml @@ -0,0 +1,43 @@ +# vim: set ft=yaml.ansible: +--- + +- name: "Install required packages" + hosts: all + vars_files: + - vars/go.yml + roles: + - crowdsecurity.testing.apt_update + - crowdsecurity.testing.go + - crowdsecurity.testing.machine_id + - crowdsecurity.testing.epel + - crowdsecurity.testing.git + - crowdsecurity.testing.gcc + - crowdsecurity.testing.gnu_make + - crowdsecurity.testing.bats_requirements + +- name: "Install Postgres" + hosts: all + become: true + vars_files: + - vars/postgres.yml + tasks: + - name: role "geerlingguy.postgresql" + ansible.builtin.include_role: + name: geerlingguy.postgresql + ## enable this for debugging + # vars: + # postgres_users_no_log: false + when: + - lookup('ansible.builtin.env', 'DB_BACKEND') in ['pgx', 'postgres'] + +- name: "Install MySQL" + hosts: all + become: true + vars_files: + - vars/mysql.yml + tasks: + - name: role "geerlingguy.mysql" + ansible.builtin.include_role: + name: geerlingguy.mysql + when: + - lookup('ansible.builtin.env', 'DB_BACKEND') == 'mysql' diff --git a/tests/ansible/provision_test_suite.yml b/tests/ansible/provision_test_suite.yml new file mode 100644 index 0000000..fa335d0 --- /dev/null +++ b/tests/ansible/provision_test_suite.yml @@ -0,0 +1,34 @@ +# vim: set ft=yaml.ansible: +--- + +- name: "Fetch the test scripts" + hosts: all + tasks: + + - name: "Fetch the sources from a git repository" + ansible.builtin.include_role: + name: crowdsecurity.testing.download_sources_from_git + when: sources_zip | length == 0 + + - name: "Extract the sources from a zip archive" + ansible.builtin.include_role: + name: crowdsecurity.testing.extract_sources_from_zip + when: sources_zip | length > 0 + + - name: "Create crowdsec tests/local dir" + become: false + ansible.builtin.file: + path: "{{ ansible_env.HOME }}/crowdsec/tests/local" + state: directory + mode: 0o755 + + vars: + sources_dest_dir: "{{ ansible_env.HOME }}/crowdsec" + sources_git_repo: >- + {{ lookup('ansible.builtin.env', 'TEST_SUITE_GIT') + | default('https://github.com/crowdsecurity/crowdsec', True) }} + sources_git_version: >- + {{ lookup('ansible.builtin.env', 'TEST_SUITE_VERSION') + | default('master', True) }} + sources_zip: >- + {{ lookup('ansible.builtin.env', 'TEST_SUITE_ZIP') }} diff --git a/tests/ansible/requirements.yml b/tests/ansible/requirements.yml new file mode 100644 index 0000000..ec09364 --- /dev/null +++ b/tests/ansible/requirements.yml @@ -0,0 +1,18 @@ +# vim: set ft=yaml.ansible: +--- + +roles: + - src: geerlingguy.mysql + - src: https://github.com/crowdsecurity/ansible-role-postgresql + version: crowdsec + name: geerlingguy.postgresql + +collections: + - name: https://github.com/crowdsecurity/ansible-collection-crowdsecurity.testing.git + type: git + version: main + +# - name: crowdsecurity.testing +# source: ../../../crowdsecurity.testing +# type: dir + diff --git a/tests/ansible/roles/make_fixture/tasks/main.yml b/tests/ansible/roles/make_fixture/tasks/main.yml new file mode 100644 index 0000000..77e8611 --- /dev/null +++ b/tests/ansible/roles/make_fixture/tasks/main.yml @@ -0,0 +1,77 @@ +# vim: set ft=yaml.ansible: +--- +- name: "Set make_cmd = make (!bsd)" + ansible.builtin.set_fact: + make_cmd: make + when: + - ansible_facts.system not in ['FreeBSD', 'OpenBSD'] + +- name: "Set make_cmd = gmake (bsd)" + ansible.builtin.set_fact: + make_cmd: gmake + when: + - ansible_facts.system in ['FreeBSD', 'OpenBSD'] + +- name: "Build crowdsec from sources, prepare test environment and fixture" + become: false + block: + - name: "Make bats-build bats-fixture" + ansible.builtin.command: + cmd: "{{ make_cmd }} bats-build bats-fixture" + chdir: "{{ ansible_env.HOME }}/crowdsec" + creates: "{{ ansible_env.HOME }}/crowdsec/tests/local-init/init-config-data.tar" + environment: + DB_BACKEND: "{{ lookup('ansible.builtin.env', 'DB_BACKEND') }}" + # daemonize -> /usr/bin or /usr/local/sbin + # pidof -> /usr/sbin + # bash -> /opt/bash/bin + PATH: "/opt/bash/bin:{{ ansible_env.PATH }}:{{ golang_install_dir }}/bin/:/usr/sbin:/usr/local/sbin" + rescue: + - name: "Read crowdsec.log" + ansible.builtin.slurp: + path: "{{ ansible_env.HOME }}/crowdsec/tests/local/var/log/crowdsec.log" + register: crowdsec_log + - name: "Show crowdsec.log" + ansible.builtin.fail: + msg: "{{ crowdsec_log['content'] | b64decode }}" + when: (package_testing is not defined) or (package_testing in ['', 'false', 'False']) + +- name: "Prepare test environment and fixture for binary package" + become: true + block: + - name: "Make bats-environment bats-check-requirements bats-fixture" + ansible.builtin.command: + cmd: "{{ make_cmd }} bats-environment bats-check-requirements bats-fixture" + chdir: "{{ ansible_env.HOME }}/crowdsec" + creates: "{{ ansible_env.HOME }}/crowdsec/tests/local-init/init-config-data.tar" + environment: + PACKAGE_TESTING: "{{ package_testing }}" + DB_BACKEND: "{{ lookup('ansible.builtin.env', 'DB_BACKEND') }}" + # daemonize -> /usr/bin or /usr/local/sbin + # pidof -> /usr/sbin + # bash -> /opt/bash/bin + PATH: "/opt/bash/bin:{{ ansible_env.PATH }}:/usr/sbin:/usr/local/sbin" + rescue: + - name: "Read crowdsec.log" + ansible.builtin.slurp: + path: "/var/log/crowdsec.log" + register: crowdsec_log + - name: "Show crowdsec.log" + ansible.builtin.fail: + msg: "{{ crowdsec_log['content'] | b64decode }}" + when: (package_testing is defined) and (package_testing not in ['', 'false', 'False']) + +- name: "Debug - show environment" + become: false + block: + - name: "Look for .environment.sh" + ansible.builtin.slurp: + src: "{{ ansible_env.HOME }}/crowdsec/tests/.environment.sh" + changed_when: true + register: envfile + - name: "Show .environment.sh" + ansible.builtin.debug: + msg: "{{ envfile['content'] | b64decode }}" + - name: "Show environment variables" + ansible.builtin.debug: + msg: "{{ ansible_env | to_nice_yaml }}" diff --git a/tests/ansible/roles/make_fixture/vars/main.yml b/tests/ansible/roles/make_fixture/vars/main.yml new file mode 100644 index 0000000..f61e6e8 --- /dev/null +++ b/tests/ansible/roles/make_fixture/vars/main.yml @@ -0,0 +1,3 @@ +# vim: set ft=yaml.ansible: +--- +package_testing: "{{ lookup('ansible.builtin.env', 'PACKAGE_TESTING') }}" diff --git a/tests/ansible/roles/run_func_tests/tasks/main.yml b/tests/ansible/roles/run_func_tests/tasks/main.yml new file mode 100644 index 0000000..741292f --- /dev/null +++ b/tests/ansible/roles/run_func_tests/tasks/main.yml @@ -0,0 +1,104 @@ +# vim: set ft=yaml.ansible: +--- +- name: "Tweak systemd configuration for tests" + become: true + block: + - name: "Create /lib/systemd/system/crowdsec.service.d" + ansible.builtin.file: + owner: root + group: root + mode: 0o755 + path: /lib/systemd/system/crowdsec.service.d + state: directory + - name: "Override StartLimitBurst" + ansible.builtin.ini_file: + dest: /lib/systemd/system/crowdsec.service.d/startlimitburst.conf + owner: root + group: root + mode: 0o644 + section: Service + option: StartLimitBurst + value: 100 + - name: "Systemctl daemon-reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - (package_testing is defined) and (package_testing not in ['', 'false', 'False']) + - ansible_facts.os_family in ["RedHat", "Debian"] + +- name: "Debug - show environment.sh" + become: false + block: + - name: "Look for .environment.sh" + ansible.builtin.slurp: + src: "{{ ansible_env.HOME }}/crowdsec/tests/.environment.sh" + changed_when: true + register: envfile + - name: "Show .environment.sh" + ansible.builtin.debug: + msg: "{{ envfile['content'] | b64decode }}" + +- name: "Search for test scripts" + become: false + ansible.builtin.find: + paths: "{{ ansible_env.HOME }}/crowdsec/tests/bats" + pattern: "*.bats" + register: testfiles + +- name: "Run BATS tests for source build" + become: false + block: + - name: "Run test scripts" + ansible.builtin.command: + cmd: tests/run-tests {{ item.path }} + chdir: "{{ ansible_env.HOME }}/crowdsec" + with_items: "{{ testfiles.files | sort(attribute='path') }}" + loop_control: + label: "{{ item['path'] }}" + environment: + # daemonize -> /usr/bin or /usr/local/sbin + # pidof -> /usr/sbin + # bash -> /opt/bash/bin + PATH: "/opt/bash/bin:{{ ansible_env.PATH }}:/usr/sbin:/usr/local/sbin" + changed_when: true + when: + - (item.path | basename) not in skip_tests.split(',') + rescue: + - name: "Read crowdsec.log" + ansible.builtin.slurp: + path: "{{ ansible_env.HOME }}/crowdsec/tests/local/var/log/crowdsec.log" + register: crowdsec_log + - name: "Show crowdsec.log" + ansible.builtin.fail: + msg: "{{ crowdsec_log['content'] | b64decode }}" + when: + - (package_testing is not defined) or (package_testing in ['', 'false', 'False']) + +- name: "Run BATS tests for binary package" + become: true + block: + - name: "Run test scripts" + ansible.builtin.command: + cmd: tests/run-tests {{ item.path }} + chdir: "{{ ansible_env.HOME }}/crowdsec" + with_items: "{{ testfiles.files | sort(attribute='path') }}" + loop_control: + label: "{{ item['path'] }}" + environment: + # daemonize -> /usr/bin or /usr/local/sbin + # pidof -> /usr/sbin + # bash -> /opt/bash/bin + PATH: "/opt/bash/bin:{{ ansible_env.PATH }}:/usr/sbin:/usr/local/sbin" + changed_when: true + when: + - (item.path | basename) not in skip_tests.split(',') + rescue: + - name: "Read crowdsec.log" + ansible.builtin.slurp: + path: "/var/log/crowdsec.log" + register: crowdsec_log + - name: "Show crowdsec.log" + ansible.builtin.fail: + msg: "{{ crowdsec_log['content'] | b64decode }}" + when: + - (package_testing is defined) and (package_testing not in ['', 'false', 'False']) diff --git a/tests/ansible/roles/run_func_tests/vars/main.yml b/tests/ansible/roles/run_func_tests/vars/main.yml new file mode 100644 index 0000000..43da543 --- /dev/null +++ b/tests/ansible/roles/run_func_tests/vars/main.yml @@ -0,0 +1,4 @@ +# vim: set ft=yaml.ansible: +--- +package_testing: "{{ lookup('ansible.builtin.env', 'PACKAGE_TESTING') }}" +skip_tests: "{{ lookup('ansible.builtin.env', 'TEST_SKIP') }}" diff --git a/tests/ansible/run_all.yml b/tests/ansible/run_all.yml new file mode 100644 index 0000000..7a25c78 --- /dev/null +++ b/tests/ansible/run_all.yml @@ -0,0 +1,8 @@ +# vim: set ft=yaml.ansible: +--- + +- import_playbook: provision_dependencies.yml +- import_playbook: provision_test_suite.yml +- import_playbook: install_binary_package.yml +- import_playbook: prepare_tests.yml +- import_playbook: run_tests.yml diff --git a/tests/ansible/run_tests.yml b/tests/ansible/run_tests.yml new file mode 100644 index 0000000..7549e02 --- /dev/null +++ b/tests/ansible/run_tests.yml @@ -0,0 +1,20 @@ +# vim: set ft=yaml.ansible: +--- + +- name: "Run functional tests" + hosts: all + gather_facts: true + vars_files: + - vars/mysql.yml + - vars/postgres.yml + roles: + - name: run_func_tests + environment: + PGHOST: 127.0.0.1 + PGPORT: 5432 + PGPASSWORD: "{{ postgresql_users[0].password }}" + PGUSER: postgres + MYSQL_HOST: localhost + MYSQL_PORT: 3306 + MYSQL_PASSWORD: "{{ mysql_root_password }}" + MYSQL_USER: "root" diff --git a/tests/ansible/vagrant/alma-8/Vagrantfile b/tests/ansible/vagrant/alma-8/Vagrantfile new file mode 100644 index 0000000..4b42adb --- /dev/null +++ b/tests/ansible/vagrant/alma-8/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/alma8' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/alma-9/Vagrantfile b/tests/ansible/vagrant/alma-9/Vagrantfile new file mode 100644 index 0000000..0ac3e5f --- /dev/null +++ b/tests/ansible/vagrant/alma-9/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/alma9' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/centos-7/Vagrantfile b/tests/ansible/vagrant/centos-7/Vagrantfile new file mode 100644 index 0000000..d7ac021 --- /dev/null +++ b/tests/ansible/vagrant/centos-7/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'centos/7' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/centos-7/skip b/tests/ansible/vagrant/centos-7/skip new file mode 100755 index 0000000..706a60b --- /dev/null +++ b/tests/ansible/vagrant/centos-7/skip @@ -0,0 +1,11 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +# postgres is too old on this distribution +[ "${DB_BACKEND}" = "postgres" ] && die "skipping: postgres too old" +[ "${DB_BACKEND}" = "pgx" ] && die "skipping: postgres too old" +exit 0 diff --git a/tests/ansible/vagrant/centos-8/Vagrantfile b/tests/ansible/vagrant/centos-8/Vagrantfile new file mode 100644 index 0000000..24c37ad --- /dev/null +++ b/tests/ansible/vagrant/centos-8/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'centos/stream8' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/centos-9/Vagrantfile b/tests/ansible/vagrant/centos-9/Vagrantfile new file mode 100644 index 0000000..412354f --- /dev/null +++ b/tests/ansible/vagrant/centos-9/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/centos9s' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/common b/tests/ansible/vagrant/common new file mode 100644 index 0000000..adafa08 --- /dev/null +++ b/tests/ansible/vagrant/common @@ -0,0 +1,46 @@ +# vim: set ft=ruby: +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.define 'crowdsec' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + # ansible.verbose = 'vvvv' + ansible.config_file = '../../ansible.cfg' + ansible.playbook = '../../run_all.yml' + end + + # same as above, to run the steps separately + + # config.vm.provision 'ansible' do |provdep| + # provdep.config_file = '../../ansible.cfg' + # provdep.playbook = '../../provision_dependencies.yml' + # end + + # config.vm.provision 'ansible' do |provtest| + # provtest.config_file = '../../ansible.cfg' + # provtest.playbook = '../../provision_test_suite.yml' + # end + + # config.vm.provision 'ansible' do |preptest| + # preptest.config_file = '../../ansible.cfg' + # preptest.playbook = '../../install_binary_package.yml' + # end + + # config.vm.provision 'ansible' do |preptest| + # preptest.config_file = '../../ansible.cfg' + # preptest.playbook = '../../prepare_tests.yml' + # end + + # config.vm.provision 'ansible' do |runtests| + # runtests.config_file = '../../ansible.cfg' + # runtests.playbook = '../../run_tests.yml' + # end +end diff --git a/tests/ansible/vagrant/debian-10-buster/Vagrantfile b/tests/ansible/vagrant/debian-10-buster/Vagrantfile new file mode 100644 index 0000000..2b1a4e2 --- /dev/null +++ b/tests/ansible/vagrant/debian-10-buster/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'debian/buster64' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/debian-11-bullseye/Vagrantfile b/tests/ansible/vagrant/debian-11-bullseye/Vagrantfile new file mode 100644 index 0000000..9166427 --- /dev/null +++ b/tests/ansible/vagrant/debian-11-bullseye/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'debian/bullseye64' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/debian-9-stretch/Vagrantfile b/tests/ansible/vagrant/debian-9-stretch/Vagrantfile new file mode 100644 index 0000000..4c4e39c --- /dev/null +++ b/tests/ansible/vagrant/debian-9-stretch/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'debian/stretch64' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/debian-9-stretch/skip b/tests/ansible/vagrant/debian-9-stretch/skip new file mode 100755 index 0000000..706a60b --- /dev/null +++ b/tests/ansible/vagrant/debian-9-stretch/skip @@ -0,0 +1,11 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +# postgres is too old on this distribution +[ "${DB_BACKEND}" = "postgres" ] && die "skipping: postgres too old" +[ "${DB_BACKEND}" = "pgx" ] && die "skipping: postgres too old" +exit 0 diff --git a/tests/ansible/vagrant/debian-testing/Vagrantfile b/tests/ansible/vagrant/debian-testing/Vagrantfile new file mode 100644 index 0000000..5e3b68e --- /dev/null +++ b/tests/ansible/vagrant/debian-testing/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'debian/testing64' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/experimental/alpine-3.16/Vagrantfile b/tests/ansible/vagrant/experimental/alpine-3.16/Vagrantfile new file mode 100644 index 0000000..aae7727 --- /dev/null +++ b/tests/ansible/vagrant/experimental/alpine-3.16/Vagrantfile @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/alpine316' + config.vm.define 'crowdsec' + + config.vm.provision 'shell', path: 'bootstrap' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/experimental/alpine-3.16/bootstrap b/tests/ansible/vagrant/experimental/alpine-3.16/bootstrap new file mode 100755 index 0000000..7fb806b --- /dev/null +++ b/tests/ansible/vagrant/experimental/alpine-3.16/bootstrap @@ -0,0 +1,7 @@ +#!/bin/sh +unset IFS +set -euf + +# coreutils -> for timeout (busybox is not enough) +sudo apk add python3 go tar procps netcat-openbsd coreutils + diff --git a/tests/ansible/vagrant/experimental/alpine-3.16/skip b/tests/ansible/vagrant/experimental/alpine-3.16/skip new file mode 100755 index 0000000..18d4c60 --- /dev/null +++ b/tests/ansible/vagrant/experimental/alpine-3.16/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${PACKAGE_TESTING}" = "true" ] && die "no package available for this distribution" +exit 0 diff --git a/tests/ansible/vagrant/experimental/amazon-linux-2/Vagrantfile b/tests/ansible/vagrant/experimental/amazon-linux-2/Vagrantfile new file mode 100644 index 0000000..76f6e19 --- /dev/null +++ b/tests/ansible/vagrant/experimental/amazon-linux-2/Vagrantfile @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'cloudnatives/amazon-linux-2' + config.vm.define 'crowdsec' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/experimental/amazon-linux-2/issues.txt b/tests/ansible/vagrant/experimental/amazon-linux-2/issues.txt new file mode 100644 index 0000000..efb2117 --- /dev/null +++ b/tests/ansible/vagrant/experimental/amazon-linux-2/issues.txt @@ -0,0 +1,3 @@ + +The file 70_http_plugin.bats hangs forever when run from ansible on amzn2, but all tests pass when run from ssh. + diff --git a/tests/ansible/vagrant/experimental/arch/Vagrantfile b/tests/ansible/vagrant/experimental/arch/Vagrantfile new file mode 100644 index 0000000..7820f66 --- /dev/null +++ b/tests/ansible/vagrant/experimental/arch/Vagrantfile @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/arch' + config.vm.define 'crowdsec' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/experimental/devuan-3/Vagrantfile b/tests/ansible/vagrant/experimental/devuan-3/Vagrantfile new file mode 100644 index 0000000..b1a621c --- /dev/null +++ b/tests/ansible/vagrant/experimental/devuan-3/Vagrantfile @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/devuan3' + config.vm.define 'crowdsec' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/experimental/devuan-3/skip b/tests/ansible/vagrant/experimental/devuan-3/skip new file mode 100755 index 0000000..18d4c60 --- /dev/null +++ b/tests/ansible/vagrant/experimental/devuan-3/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${PACKAGE_TESTING}" = "true" ] && die "no package available for this distribution" +exit 0 diff --git a/tests/ansible/vagrant/experimental/dragonflybsd-6/Vagrantfile b/tests/ansible/vagrant/experimental/dragonflybsd-6/Vagrantfile new file mode 100644 index 0000000..63019b6 --- /dev/null +++ b/tests/ansible/vagrant/experimental/dragonflybsd-6/Vagrantfile @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/dragonflybsd6' + config.vm.define 'crowdsec' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/experimental/gentoo/Vagrantfile b/tests/ansible/vagrant/experimental/gentoo/Vagrantfile new file mode 100644 index 0000000..e4664ae --- /dev/null +++ b/tests/ansible/vagrant/experimental/gentoo/Vagrantfile @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/gentoo' + config.vm.define 'crowdsec' + + config.vm.provision 'shell', path: 'bootstrap' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/experimental/gentoo/bootstrap b/tests/ansible/vagrant/experimental/gentoo/bootstrap new file mode 100755 index 0000000..513af50 --- /dev/null +++ b/tests/ansible/vagrant/experimental/gentoo/bootstrap @@ -0,0 +1,3 @@ +#!/bin/sh + +sudo emerge --quiet app-portage/gentoolkit dev-vcs/git net-misc/curl app-misc/jq net-analyzer/openbsd-netcat diff --git a/tests/ansible/vagrant/experimental/hardenedbsd-13/Vagrantfile b/tests/ansible/vagrant/experimental/hardenedbsd-13/Vagrantfile new file mode 100644 index 0000000..0d34ea1 --- /dev/null +++ b/tests/ansible/vagrant/experimental/hardenedbsd-13/Vagrantfile @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/hardenedbsd13' + config.vm.define 'crowdsec' + + config.vm.provision 'shell', path: 'bootstrap' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/experimental/hardenedbsd-13/bootstrap b/tests/ansible/vagrant/experimental/hardenedbsd-13/bootstrap new file mode 100755 index 0000000..370b1b6 --- /dev/null +++ b/tests/ansible/vagrant/experimental/hardenedbsd-13/bootstrap @@ -0,0 +1,5 @@ +#!/bin/sh +unset IFS +set -euf + +sudo pkg install python3 diff --git a/tests/ansible/vagrant/experimental/hardenedbsd-13/skip b/tests/ansible/vagrant/experimental/hardenedbsd-13/skip new file mode 100755 index 0000000..18d4c60 --- /dev/null +++ b/tests/ansible/vagrant/experimental/hardenedbsd-13/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${PACKAGE_TESTING}" = "true" ] && die "no package available for this distribution" +exit 0 diff --git a/tests/ansible/vagrant/experimental/netbsd-9/Vagrantfile b/tests/ansible/vagrant/experimental/netbsd-9/Vagrantfile new file mode 100644 index 0000000..36c9deb --- /dev/null +++ b/tests/ansible/vagrant/experimental/netbsd-9/Vagrantfile @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/netbsd9' + config.vm.define 'crowdsec' + + # config.vm.provision 'shell', path: 'bootstrap' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/experimental/openbsd-7/Vagrantfile b/tests/ansible/vagrant/experimental/openbsd-7/Vagrantfile new file mode 100644 index 0000000..d7f9801 --- /dev/null +++ b/tests/ansible/vagrant/experimental/openbsd-7/Vagrantfile @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/openbsd7' + config.vm.define 'crowdsec' + + config.vm.provision 'shell', path: 'bootstrap' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/experimental/openbsd-7/bootstrap b/tests/ansible/vagrant/experimental/openbsd-7/bootstrap new file mode 100755 index 0000000..3b2480d --- /dev/null +++ b/tests/ansible/vagrant/experimental/openbsd-7/bootstrap @@ -0,0 +1,6 @@ +#!/bin/sh +unset IFS +set -euf + +sudo pkg_add -u +sudo pkg_add python-3.9.13 py3-pip gcc-11.2.0p2 openssl-3.0.3p0 gtar-1.34 truncate-5.2.1 diff --git a/tests/ansible/vagrant/experimental/openbsd-7/skip b/tests/ansible/vagrant/experimental/openbsd-7/skip new file mode 100755 index 0000000..18d4c60 --- /dev/null +++ b/tests/ansible/vagrant/experimental/openbsd-7/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${PACKAGE_TESTING}" = "true" ] && die "no package available for this distribution" +exit 0 diff --git a/tests/ansible/vagrant/experimental/opensuse-15.4/Vagrantfile b/tests/ansible/vagrant/experimental/opensuse-15.4/Vagrantfile new file mode 100644 index 0000000..4a3ec30 --- /dev/null +++ b/tests/ansible/vagrant/experimental/opensuse-15.4/Vagrantfile @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'opensuse/Leap-15.4.x86_64' + config.vm.define 'crowdsec' + + config.vm.provision 'shell', path: 'bootstrap' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/experimental/ubuntu-14.04-trusty/Vagrantfile b/tests/ansible/vagrant/experimental/ubuntu-14.04-trusty/Vagrantfile new file mode 100644 index 0000000..bcb67b1 --- /dev/null +++ b/tests/ansible/vagrant/experimental/ubuntu-14.04-trusty/Vagrantfile @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'peru/ubuntu-14.04-server-amd64' + config.vm.define 'crowdsec' + + config.vm.box_version = '20190901.01' + + config.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 1536 + end + + config.vm.synced_folder '.', '/vagrant', disabled: true + + config.vm.provision 'ansible' do |ansible| + ansible.config_file = '../../../ansible.cfg' + ansible.playbook = '../../../run_all.yml' + end +end diff --git a/tests/ansible/vagrant/fedora-33/Vagrantfile b/tests/ansible/vagrant/fedora-33/Vagrantfile new file mode 100644 index 0000000..49b5ee9 --- /dev/null +++ b/tests/ansible/vagrant/fedora-33/Vagrantfile @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + # config.vm.box = "fedora/33-cloud-base" + config.vm.box = 'generic/fedora33' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/fedora-33/skip b/tests/ansible/vagrant/fedora-33/skip new file mode 100755 index 0000000..4f1a906 --- /dev/null +++ b/tests/ansible/vagrant/fedora-33/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/tests/ansible/vagrant/fedora-34/Vagrantfile b/tests/ansible/vagrant/fedora-34/Vagrantfile new file mode 100644 index 0000000..1d172c9 --- /dev/null +++ b/tests/ansible/vagrant/fedora-34/Vagrantfile @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + # config.vm.box = "fedora/34-cloud-base" + config.vm.box = 'generic/fedora34' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/fedora-34/skip b/tests/ansible/vagrant/fedora-34/skip new file mode 100755 index 0000000..4f1a906 --- /dev/null +++ b/tests/ansible/vagrant/fedora-34/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/tests/ansible/vagrant/fedora-35/Vagrantfile b/tests/ansible/vagrant/fedora-35/Vagrantfile new file mode 100644 index 0000000..f117307 --- /dev/null +++ b/tests/ansible/vagrant/fedora-35/Vagrantfile @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + # config.vm.box = 'fedora/35-cloud-base' + config.vm.box = 'generic/fedora35' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/fedora-35/skip b/tests/ansible/vagrant/fedora-35/skip new file mode 100755 index 0000000..4f1a906 --- /dev/null +++ b/tests/ansible/vagrant/fedora-35/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/tests/ansible/vagrant/fedora-36/Vagrantfile b/tests/ansible/vagrant/fedora-36/Vagrantfile new file mode 100644 index 0000000..ef80f51 --- /dev/null +++ b/tests/ansible/vagrant/fedora-36/Vagrantfile @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + # config.vm.box = "fedora/36-cloud-base" + config.vm.box = 'generic/fedora36' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/fedora-36/skip b/tests/ansible/vagrant/fedora-36/skip new file mode 100755 index 0000000..4f1a906 --- /dev/null +++ b/tests/ansible/vagrant/fedora-36/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/tests/ansible/vagrant/freebsd-12/Vagrantfile b/tests/ansible/vagrant/freebsd-12/Vagrantfile new file mode 100644 index 0000000..33e6b47 --- /dev/null +++ b/tests/ansible/vagrant/freebsd-12/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/freebsd12' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/freebsd-12/skip b/tests/ansible/vagrant/freebsd-12/skip new file mode 100755 index 0000000..a72dde8 --- /dev/null +++ b/tests/ansible/vagrant/freebsd-12/skip @@ -0,0 +1,12 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${PACKAGE_TESTING}" = "true" ] && die "no package available for this distribution" +[ "${DB_BACKEND}" = "postgres" ] && die "postgres role does not support this distribution" +[ "${DB_BACKEND}" = "pgx" ] && die "postgres role does not support this distribution" +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/tests/ansible/vagrant/freebsd-13/Vagrantfile b/tests/ansible/vagrant/freebsd-13/Vagrantfile new file mode 100644 index 0000000..851c042 --- /dev/null +++ b/tests/ansible/vagrant/freebsd-13/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/freebsd13' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/freebsd-13/skip b/tests/ansible/vagrant/freebsd-13/skip new file mode 100755 index 0000000..a72dde8 --- /dev/null +++ b/tests/ansible/vagrant/freebsd-13/skip @@ -0,0 +1,12 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${PACKAGE_TESTING}" = "true" ] && die "no package available for this distribution" +[ "${DB_BACKEND}" = "postgres" ] && die "postgres role does not support this distribution" +[ "${DB_BACKEND}" = "pgx" ] && die "postgres role does not support this distribution" +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/tests/ansible/vagrant/oracle-7/Vagrantfile b/tests/ansible/vagrant/oracle-7/Vagrantfile new file mode 100644 index 0000000..638a612 --- /dev/null +++ b/tests/ansible/vagrant/oracle-7/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/oracle7' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/oracle-7/skip b/tests/ansible/vagrant/oracle-7/skip new file mode 100755 index 0000000..706a60b --- /dev/null +++ b/tests/ansible/vagrant/oracle-7/skip @@ -0,0 +1,11 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +# postgres is too old on this distribution +[ "${DB_BACKEND}" = "postgres" ] && die "skipping: postgres too old" +[ "${DB_BACKEND}" = "pgx" ] && die "skipping: postgres too old" +exit 0 diff --git a/tests/ansible/vagrant/oracle-8/Vagrantfile b/tests/ansible/vagrant/oracle-8/Vagrantfile new file mode 100644 index 0000000..425ad5e --- /dev/null +++ b/tests/ansible/vagrant/oracle-8/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/oracle8' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/oracle-9/Vagrantfile b/tests/ansible/vagrant/oracle-9/Vagrantfile new file mode 100644 index 0000000..d4e3f61 --- /dev/null +++ b/tests/ansible/vagrant/oracle-9/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/oracle9' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/rocky-8/Vagrantfile b/tests/ansible/vagrant/rocky-8/Vagrantfile new file mode 100644 index 0000000..c7315cc --- /dev/null +++ b/tests/ansible/vagrant/rocky-8/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/rocky8' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/rocky-9/Vagrantfile b/tests/ansible/vagrant/rocky-9/Vagrantfile new file mode 100644 index 0000000..0adb3ab --- /dev/null +++ b/tests/ansible/vagrant/rocky-9/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/rocky9' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/ubuntu-16.04-xenial/Vagrantfile b/tests/ansible/vagrant/ubuntu-16.04-xenial/Vagrantfile new file mode 100644 index 0000000..86646ee --- /dev/null +++ b/tests/ansible/vagrant/ubuntu-16.04-xenial/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/ubuntu1604' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/ubuntu-16.04-xenial/skip b/tests/ansible/vagrant/ubuntu-16.04-xenial/skip new file mode 100755 index 0000000..706a60b --- /dev/null +++ b/tests/ansible/vagrant/ubuntu-16.04-xenial/skip @@ -0,0 +1,11 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +# postgres is too old on this distribution +[ "${DB_BACKEND}" = "postgres" ] && die "skipping: postgres too old" +[ "${DB_BACKEND}" = "pgx" ] && die "skipping: postgres too old" +exit 0 diff --git a/tests/ansible/vagrant/ubuntu-18.04-bionic/Vagrantfile b/tests/ansible/vagrant/ubuntu-18.04-bionic/Vagrantfile new file mode 100644 index 0000000..70a7780 --- /dev/null +++ b/tests/ansible/vagrant/ubuntu-18.04-bionic/Vagrantfile @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + # the official boxes only supports virtualbox + config.vm.box = 'generic/ubuntu1804' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/ubuntu-20.04-focal/Vagrantfile b/tests/ansible/vagrant/ubuntu-20.04-focal/Vagrantfile new file mode 100644 index 0000000..0006ae9 --- /dev/null +++ b/tests/ansible/vagrant/ubuntu-20.04-focal/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/ubuntu2004' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vagrant/ubuntu-22.04-jammy/Vagrantfile b/tests/ansible/vagrant/ubuntu-22.04-jammy/Vagrantfile new file mode 100644 index 0000000..c0ccee5 --- /dev/null +++ b/tests/ansible/vagrant/ubuntu-22.04-jammy/Vagrantfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/ubuntu2204' +end + +common = '../common' +load common if File.exist?(common) diff --git a/tests/ansible/vars/go.yml b/tests/ansible/vars/go.yml new file mode 100644 index 0000000..de11ec1 --- /dev/null +++ b/tests/ansible/vars/go.yml @@ -0,0 +1,5 @@ +# vim: set ft=yaml.ansible: +--- + +golang_version: "1.19.1" +golang_install_dir: "/opt/go/{{ golang_version }}" diff --git a/tests/ansible/vars/mysql.yml b/tests/ansible/vars/mysql.yml new file mode 100644 index 0000000..cf73593 --- /dev/null +++ b/tests/ansible/vars/mysql.yml @@ -0,0 +1,5 @@ +# vim: set ft=yaml.ansible: +--- + +# The password is insecure since the db is ephemeral and only listen to localhost. +mysql_root_password: password diff --git a/tests/ansible/vars/postgres.yml b/tests/ansible/vars/postgres.yml new file mode 100644 index 0000000..5c01f21 --- /dev/null +++ b/tests/ansible/vars/postgres.yml @@ -0,0 +1,30 @@ +# vim: set ft=yaml.ansible: +--- + +# The password is insecure since the db is ephemeral and only listen to localhost. +postgresql_users: + - name: postgres + password: postgres + +postgresql_hba_entries: + - type: local + database: all + user: postgres + auth_method: peer + + - type: local + database: all + user: all + auth_method: peer + + - type: host + database: all + user: all + address: "127.0.0.1/32" + auth_method: md5 + + - type: host + database: all + user: all + address: "::1/128" + auth_method: md5 diff --git a/tests/bats.mk b/tests/bats.mk new file mode 100644 index 0000000..b39d952 --- /dev/null +++ b/tests/bats.mk @@ -0,0 +1,118 @@ + +# contains scripts, bats submodules, local instances and functional test suite +TEST_DIR = $(CURDIR)/tests + +ifdef PACKAGE_TESTING + # define PACKAGE_TESTING to test the executables already installed with + # *.deb, *.rpm... + LOCAL_DIR = / + BIN_DIR = /usr/bin + INIT_BACKEND = systemd + CONFIG_BACKEND = global +else + # LOCAL_DIR will contain contains a local instance of crowdsec, complete with + # configuration and data + LOCAL_DIR = $(TEST_DIR)/local + BIN_DIR = $(LOCAL_DIR)/bin + INIT_BACKEND = daemon + CONFIG_BACKEND = local + PACKAGE_TESTING = +endif + +CONFIG_DIR = $(LOCAL_DIR)/etc/crowdsec +DATA_DIR = $(LOCAL_DIR)/var/lib/crowdsec/data +LOCAL_INIT_DIR = $(TEST_DIR)/local-init +LOG_DIR = $(LOCAL_DIR)/var/log +PID_DIR = $(LOCAL_DIR)/var/run +PLUGIN_DIR = $(LOCAL_DIR)/lib/crowdsec/plugins +DB_BACKEND ?= sqlite + +ifdef TEST_COVERAGE + CROWDSEC = $(TEST_DIR)/bin/crowdsec-wrapper + CSCLI = $(TEST_DIR)/bin/cscli-wrapper + BINCOVER_TESTING = true +else + # the wrappers should work here too - it detects TEST_COVERAGE - but we allow + # overriding the path to the binaries + CROWDSEC ?= $(BIN_DIR)/crowdsec + CSCLI ?= $(BIN_DIR)/cscli + # any value is considered true + BINCOVER_TESTING = +endif + +# If you change the name of the crowdsec executable, make sure the pgrep +# parameters are correct in $(TEST_DIR)/assert-crowdsec-not-running + +define ENV := +export TEST_DIR="$(TEST_DIR)" +export LOCAL_DIR="$(LOCAL_DIR)" +export BIN_DIR="$(BIN_DIR)" +export CROWDSEC="$(CROWDSEC)" +export CSCLI="$(CSCLI)" +export CONFIG_YAML="$(CONFIG_DIR)/config.yaml" +export LOCAL_INIT_DIR="$(LOCAL_INIT_DIR)" +export LOG_DIR="$(LOG_DIR)" +export PID_DIR="$(PID_DIR)" +export PLUGIN_DIR="$(PLUGIN_DIR)" +export DB_BACKEND="$(DB_BACKEND)" +export INIT_BACKEND="$(INIT_BACKEND)" +export CONFIG_BACKEND="$(CONFIG_BACKEND)" +export PACKAGE_TESTING="$(PACKAGE_TESTING)" +export TEST_COVERAGE="$(TEST_COVERAGE)" +endef + +bats-all: bats-clean bats-build bats-fixture bats-test bats-test-hub + +# Source this to run the scripts outside of the Makefile +# Old versions of make don't have $(file) directive +bats-environment: export ENV:=$(ENV) +bats-environment: + @echo "$${ENV}" > $(TEST_DIR)/.environment.sh + +# Verify dependencies and submodules +bats-check-requirements: + @$(TEST_DIR)/bin/check-requirements + +# Build and installs crowdsec in a local directory. Rebuilds if already exists. +bats-build: bats-environment bats-check-requirements + @mkdir -p $(BIN_DIR) $(LOG_DIR) $(PID_DIR) $(PLUGIN_DIR) + @BINCOVER_TESTING=$(BINCOVER_TESTING) DEFAULT_CONFIGDIR=$(CONFIG_DIR) DEFAULT_DATADIR=$(DATA_DIR) $(MAKE) goversion crowdsec cscli plugins + @install -m 0755 cmd/crowdsec/crowdsec cmd/crowdsec-cli/cscli $(BIN_DIR)/ + @install -m 0755 plugins/notifications/*/notification-* $(PLUGIN_DIR)/ + @BINCOVER_TESTING=$(BINCOVER_TESTING) DEFAULT_CONFIGDIR=$(CONFIG_DIR) DEFAULT_DATADIR=$(DATA_DIR) $(MAKE) goversion crowdsec-bincover cscli-bincover + @install -m 0755 cmd/crowdsec/crowdsec.cover cmd/crowdsec-cli/cscli.cover $(BIN_DIR)/ + +# Create a reusable package with initial configuration + data +bats-fixture: + @$(TEST_DIR)/instance-data make + +# Remove the local crowdsec installation and the fixture config + data +# Don't remove LOCAL_DIR directly because it could be / or anything else outside the repo +bats-clean: + @$(RM) $(TEST_DIR)/local $(WIN_IGNORE_ERR) + @$(RM) $(LOCAL_INIT_DIR) $(WIN_IGNORE_ERR) + @$(RM) $(TEST_DIR)/dyn-bats/*.bats $(WIN_IGNORE_ERR) + @$(RM) tests/.environment.sh $(WIN_IGNORE_ERR) + +# Run the test suite +bats-test: bats-environment bats-check-requirements + $(TEST_DIR)/run-tests $(TEST_DIR)/bats + +# Generate dynamic tests +bats-test-hub: bats-environment bats-check-requirements + @$(TEST_DIR)/bin/generate-hub-tests + $(TEST_DIR)/run-tests $(TEST_DIR)/dyn-bats + +# Static checks for the test scripts. +# Not failproof but they can catch bugs and improve learning of sh/bash +bats-lint: + @shellcheck --version >/dev/null 2>&1 || (echo "ERROR: shellcheck is required."; exit 1) + @shellcheck -x $(TEST_DIR)/bats/*.bats + + +bats-test-package: bats-environment + $(TEST_DIR)/instance-data make + $(TEST_DIR)/run-tests $(TEST_DIR)/bats + $(TEST_DIR)/run-tests $(TEST_DIR)/dyn-bats + +.PHONY: bats-environment diff --git a/tests/bats/01_base.bats b/tests/bats/01_base.bats new file mode 100644 index 0000000..bdcbaa3 --- /dev/null +++ b/tests/bats/01_base.bats @@ -0,0 +1,271 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + ./instance-crowdsec start +} + +teardown() { + ./instance-crowdsec stop +} + +# to silence shellcheck +declare stderr + +#---------- + +@test "cscli - usage" { + run -0 cscli + assert_output --partial "Usage:" + assert_output --partial "cscli [command]" + assert_output --partial "Available Commands:" + + # no "usage" output after every error + run -1 --separate-stderr cscli blahblah + # error is displayed as log entry, not with print + assert_stderr --partial 'level=fatal msg="unknown command \"blahblah\" for \"cscli\""' + refute_stderr --partial 'unknown command "blahblah" for "cscli"' +} + +@test "cscli version" { + run -0 cscli version + assert_output --partial "version:" + assert_output --partial "Codename:" + assert_output --partial "BuildDate:" + assert_output --partial "GoVersion:" + assert_output --partial "Platform:" + assert_output --partial "Constraint_parser:" + assert_output --partial "Constraint_scenario:" + assert_output --partial "Constraint_api:" + assert_output --partial "Constraint_acquis:" + + # should work without configuration file + rm "${CONFIG_YAML}" + run -0 cscli version + assert_output --partial "version:" +} + +@test "cscli help" { + run -0 cscli help + assert_line "Available Commands:" + assert_line --regexp ".* help .* Help about any command" + + # should work without configuration file + rm "${CONFIG_YAML}" + run -0 cscli help + assert_line "Available Commands:" +} + +@test "cscli config show -o human" { + run -0 cscli config show -o human + assert_output --partial "Global:" + assert_output --partial "Crowdsec:" + assert_output --partial "cscli:" + assert_output --partial "Local API Server:" +} + +@test "cscli config show -o json" { + run -0 cscli config show -o json + assert_output --partial '"API":' + assert_output --partial '"Common":' + assert_output --partial '"ConfigPaths":' + assert_output --partial '"Crowdsec":' + assert_output --partial '"Cscli":' + assert_output --partial '"DbConfig":' + assert_output --partial '"Hub":' + assert_output --partial '"PluginConfig":' + assert_output --partial '"Prometheus":' +} + +@test "cscli config show -o raw" { + run -0 cscli config show -o raw + assert_line "api:" + assert_line "common:" + assert_line "config_paths:" + assert_line "crowdsec_service:" + assert_line "cscli:" + assert_line "db_config:" + assert_line "plugin_config:" + assert_line "prometheus:" +} + +@test "cscli config show --key" { + run -0 cscli config show --key Config.API.Server.ListenURI + assert_output "127.0.0.1:8080" +} + +@test "cscli config backup / restore" { + # test that we need a valid path + # disabled because in CI, the empty string is not passed as a parameter + ## run -1 --separate-stderr cscli config backup "" + ## assert_stderr --partial "Failed to backup configurations: directory path can't be empty" + + run -1 --separate-stderr cscli config backup "/dev/null/blah" + assert_stderr --partial "Failed to backup configurations: while creating /dev/null/blah: mkdir /dev/null/blah: not a directory" + + # pick a dirpath + backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + + # succeed the first time + run -0 cscli config backup "${backupdir}" + assert_output --partial "Starting configuration backup" + + # don't overwrite an existing backup + run -1 --separate-stderr cscli config backup "${backupdir}" + assert_stderr --partial "Failed to backup configurations" + assert_stderr --partial "file exists" + + SIMULATION_YAML="$(config_get '.config_paths.simulation_path')" + + # restore + rm "${SIMULATION_YAML}" + run -0 cscli config restore "${backupdir}" + assert_file_exist "${SIMULATION_YAML}" + + # cleanup + rm -rf -- "${backupdir:?}" + + # backup: detect missing files + rm "${SIMULATION_YAML}" + run -1 --separate-stderr cscli config backup "${backupdir}" + assert_stderr --regexp "Failed to backup configurations: failed copy .* to .*: stat .*: no such file or directory" + rm -rf -- "${backupdir:?}" +} + +@test "cscli lapi status" { + run -0 --separate-stderr cscli lapi status + + run -0 echo "${stderr}" + assert_output --partial "Loaded credentials from" + assert_output --partial "Trying to authenticate with username" + assert_output --partial " on http://127.0.0.1:8080/" + assert_output --partial "You can successfully interact with Local API (LAPI)" +} + +@test "cscli - missing LAPI credentials file" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + rm -f "${LOCAL_API_CREDENTIALS}" + run -1 --separate-stderr cscli lapi status + assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" + + run -1 --separate-stderr cscli alerts list + assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" + + run -1 --separate-stderr cscli decisions list + assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" +} + +@test "cscli - empty LAPI credentials file" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + truncate -s 0 "${LOCAL_API_CREDENTIALS}" + run -1 --separate-stderr cscli lapi status + assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" + + run -1 --separate-stderr cscli alerts list + assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" + + run -1 --separate-stderr cscli decisions list + assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" +} + +@test "cscli - missing LAPI client settings" { + config_set 'del(.api.client)' + run -1 --separate-stderr cscli lapi status + assert_stderr --partial "loading api client: no API client section in configuration" + + run -1 --separate-stderr cscli alerts list + assert_stderr --partial "loading api client: no API client section in configuration" + + run -1 --separate-stderr cscli decisions list + assert_stderr --partial "loading api client: no API client section in configuration" +} + +@test "cscli - malformed LAPI url" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + config_set "${LOCAL_API_CREDENTIALS}" '.url="https://127.0.0.1:-80"' + + run -1 --separate-stderr cscli lapi status + assert_stderr --partial 'parsing api url' + assert_stderr --partial 'invalid port \":-80\" after host' + + run -1 --separate-stderr cscli alerts list + assert_stderr --partial 'parsing api url' + assert_stderr --partial 'invalid port \":-80\" after host' + + run -1 --separate-stderr cscli decisions list + assert_stderr --partial 'parsing api url' + assert_stderr --partial 'invalid port \":-80\" after host' +} + +@test "cscli metrics" { + run -0 cscli lapi status + run -0 --separate-stderr cscli metrics + assert_output --partial "Route" + assert_output --partial '/v1/watchers/login' + assert_output --partial "Local Api Metrics:" + +} + +@test "'cscli completion' with or without configuration file" { + run -0 cscli completion bash + assert_output --partial "# bash completion for cscli" + run -0 cscli completion zsh + assert_output --partial "# zsh completion for cscli" + run -0 cscli completion powershell + assert_output --partial "# powershell completion for cscli" + run -0 cscli completion fish + assert_output --partial "# fish completion for cscli" + + rm "${CONFIG_YAML}" + run -0 cscli completion bash + assert_output --partial "# bash completion for cscli" +} + +@test "cscli hub list" { + # we check for the presence of some objects. There may be others when we + # use $PACKAGE_TESTING, so the order is not important. + + run -0 cscli hub list -o human + assert_line --regexp '^ crowdsecurity/linux' + assert_line --regexp '^ crowdsecurity/sshd' + assert_line --regexp '^ crowdsecurity/dateparse-enrich' + assert_line --regexp '^ crowdsecurity/geoip-enrich' + assert_line --regexp '^ crowdsecurity/sshd-logs' + assert_line --regexp '^ crowdsecurity/syslog-logs' + assert_line --regexp '^ crowdsecurity/ssh-bf' + assert_line --regexp '^ crowdsecurity/ssh-slow-bf' + + run -0 cscli hub list -o raw + assert_line --regexp '^crowdsecurity/linux,enabled,[0-9]+\.[0-9]+,core linux support : syslog\+geoip\+ssh,collections$' + assert_line --regexp '^crowdsecurity/sshd,enabled,[0-9]+\.[0-9]+,sshd support : parser and brute-force detection,collections$' + assert_line --regexp '^crowdsecurity/dateparse-enrich,enabled,[0-9]+\.[0-9]+,,parsers$' + assert_line --regexp '^crowdsecurity/geoip-enrich,enabled,[0-9]+\.[0-9]+,"Populate event with geoloc info : as, country, coords, source range.",parsers$' + assert_line --regexp '^crowdsecurity/sshd-logs,enabled,[0-9]+\.[0-9]+,Parse openSSH logs,parsers$' + assert_line --regexp '^crowdsecurity/syslog-logs,enabled,[0-9]+\.[0-9]+,,parsers$' + assert_line --regexp '^crowdsecurity/ssh-bf,enabled,[0-9]+\.[0-9]+,Detect ssh bruteforce,scenarios$' + assert_line --regexp '^crowdsecurity/ssh-slow-bf,enabled,[0-9]+\.[0-9]+,Detect slow ssh bruteforce,scenarios$' + + run -0 cscli hub list -o json + run jq -r '.collections[].name, .parsers[].name, .scenarios[].name' <(output) + assert_line 'crowdsecurity/linux' + assert_line 'crowdsecurity/sshd' + assert_line 'crowdsecurity/dateparse-enrich' + assert_line 'crowdsecurity/geoip-enrich' + assert_line 'crowdsecurity/sshd-logs' + assert_line 'crowdsecurity/syslog-logs' + assert_line 'crowdsecurity/ssh-bf' + assert_line 'crowdsecurity/ssh-slow-bf' +} diff --git a/tests/bats/01_crowdsec.bats b/tests/bats/01_crowdsec.bats new file mode 100644 index 0000000..a60b576 --- /dev/null +++ b/tests/bats/01_crowdsec.bats @@ -0,0 +1,181 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "crowdsec (usage)" { + run -0 --separate-stderr timeout 2s "${CROWDSEC}" -h + assert_stderr_line --regexp "Usage of .*:" + + run -0 --separate-stderr timeout 2s "${CROWDSEC}" --help + assert_stderr_line --regexp "Usage of .*:" +} + +@test "crowdsec (unknown flag)" { + run -2 --separate-stderr timeout 2s "${CROWDSEC}" --foobar + assert_stderr_line "flag provided but not defined: -foobar" + assert_stderr_line --regexp "Usage of .*" +} + +@test "crowdsec (unknown argument)" { + run -2 --separate-stderr timeout 2s "${CROWDSEC}" trololo + assert_stderr_line "argument provided but not defined: trololo" + assert_stderr_line --regexp "Usage of .*" +} + +@test "crowdsec (no api and no agent)" { + run -1 --separate-stderr timeout 2s "${CROWDSEC}" -no-api -no-cs + assert_stderr_line --partial "You must run at least the API Server or crowdsec" +} + +@test "crowdsec - print error on exit" { + # errors that cause program termination are printed to stderr, not only logs + config_set '.db_config.type="meh"' + run -1 --separate-stderr "${CROWDSEC}" + refute_output + assert_stderr --partial "unable to create database client: unknown database type 'meh'" +} + +@test "CS_LAPI_SECRET not strong enough" { + CS_LAPI_SECRET=foo run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: unable to run local API: controller init: CS_LAPI_SECRET not strong enough" +} + +@test "crowdsec - reload (change of logfile, disabled agent)" { + logdir1=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + log_old="${logdir1}/crowdsec.log" + config_set ".common.log_dir=\"${logdir1}\"" + + run -0 ./instance-crowdsec start + # PID="$output" + assert_file_exist "$log_old" + assert_file_contains "$log_old" "Starting processing data" + + logdir2=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + log_new="${logdir2}/crowdsec.log" + config_set ".common.log_dir=\"${logdir2}\"" + + config_disable_agent + + sleep 5 + + # this won't work as crowdsec-wrapper does not relay the signal + # run -0 kill -HUP "$PID" + + # During functional tests, crowdsec is often run from a wrapper script, + # which captures its output (for coverage reports) and cannot relay signals + # at the same time. So instead of sending a SIGHUP to the wrapper, we send + # it to the crowdsec process by name - with or without coverage. + run pkill -HUP -f "$BIN_DIR/crowdsec.cover" + run pkill -HUP -f "$BIN_DIR/crowdsec" + + for ((i=0; i<10; i++)); do + sleep 1 + grep -q "serve: shutting down api server" <"$log_old" && break + done + + echo "waited $i seconds" + + echo + echo "OLD LOG" + echo + ls -la "$log_old" || true + cat "$log_old" || true + + assert_file_contains "$log_old" "SIGHUP received, reloading" + assert_file_contains "$log_old" "Crowdsec engine shutting down" + assert_file_contains "$log_old" "Killing parser routines" + assert_file_contains "$log_old" "Bucket routine exiting" + assert_file_contains "$log_old" "serve: shutting down api server" + + sleep 5 + + assert_file_exist "$log_new" + + for ((i=0; i<10; i++)); do + sleep 1 + grep -q "Reload is finished" <"$log_old" && break + done + + echo "waited $i seconds" + + echo + echo "NEW LOG" + echo + ls -la "$log_new" || true + cat "$log_new" || true + + assert_file_contains "$log_new" "CrowdSec Local API listening on 127.0.0.1:8080" + assert_file_contains "$log_new" "Reload is finished" + + run -0 ./instance-crowdsec stop +} + +@test "crowdsec (error if the acquisition_path file is defined but missing)" { + ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') + rm -f "$ACQUIS_YAML" + + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr_line --partial "acquis.yaml: no such file or directory" +} + +@test "crowdsec (error if acquisition_path is not defined and acquisition_dir is empty)" { + ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') + rm -f "$ACQUIS_YAML" + config_set '.crowdsec_service.acquisition_path=""' + + ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') + rm -f "$ACQUIS_DIR" + + config_set '.common.log_media="stdout"' + run -124 --separate-stderr timeout 2s "${CROWDSEC}" + # check warning + assert_stderr_line --partial "no acquisition file found" +} + +@test "crowdsec (error if acquisition_path and acquisition_dir are not defined)" { + ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') + rm -f "$ACQUIS_YAML" + config_set '.crowdsec_service.acquisition_path=""' + + ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') + rm -f "$ACQUIS_DIR" + config_set '.crowdsec_service.acquisition_dir=""' + + config_set '.common.log_media="stdout"' + run -124 --separate-stderr timeout 2s "${CROWDSEC}" + # check warning + assert_stderr_line --partial "no acquisition_path or acquisition_dir specified" +} + +@test "crowdsec (no error if acquisition_path is empty string but acquisition_dir is not empty)" { + ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') + rm -f "$ACQUIS_YAML" + config_set '.crowdsec_service.acquisition_path=""' + + ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') + mkdir -p "$ACQUIS_DIR" + touch "$ACQUIS_DIR"/foo.yaml + + run -124 --separate-stderr timeout 2s "${CROWDSEC}" +} diff --git a/tests/bats/02_nolapi.bats b/tests/bats/02_nolapi.bats new file mode 100644 index 0000000..a61275c --- /dev/null +++ b/tests/bats/02_nolapi.bats @@ -0,0 +1,90 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + # always reset config and data, but run the daemon only if one test requires it + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "test without -no-api flag" { + run -124 --separate-stderr timeout 2s "${CROWDSEC}" + # from `man timeout`: If the command times out, and --preserve-status is not set, then exit with status 124. +} + +@test "crowdsec should not run without LAPI (-no-api flag)" { + # really needs 4 secs on slow boxes + run -1 --separate-stderr timeout 4s "${CROWDSEC}" -no-api +} + +@test "crowdsec should not run without LAPI (no api.server in configuration file)" { + config_disable_lapi + # really needs 4 secs on slow boxes + run -1 --separate-stderr timeout 4s "${CROWDSEC}" + + assert_stderr --partial "crowdsec local API is disabled" +} + +@test "capi status shouldn't be ok without api.server" { + config_disable_lapi + run -1 --separate-stderr cscli capi status + + assert_stderr --partial "crowdsec local API is disabled" + assert_stderr --partial "There is no configuration on 'api.server:'" +} + +@test "cscli config show -o human" { + config_disable_lapi + run -0 cscli config show -o human + assert_output --partial "Global:" + assert_output --partial "Crowdsec:" + assert_output --partial "cscli:" + refute_output --partial "Local API Server:" +} + +@test "cscli config backup" { + config_disable_lapi + backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + run -0 cscli config backup "${backupdir}" + assert_output --partial "Starting configuration backup" + run -1 --separate-stderr cscli config backup "${backupdir}" + rm -rf -- "${backupdir:?}" + + assert_stderr --partial "Failed to backup configurations" + assert_stderr --partial "file exists" +} + +@test "lapi status shouldn't be ok without api.server" { + config_disable_lapi + ./instance-crowdsec start || true + run -1 --separate-stderr cscli machines list + assert_stderr --partial "Local API is disabled, please run this command on the local API machine" +} + +@test "cscli metrics" { + skip 'need to trigger metrics with a live parse' + config_disable_lapi + ./instance-crowdsec start + run -0 --separate-stderr cscli metrics + assert_output --partial "ROUTE" + assert_output --partial "/v1/watchers/login" + + assert_stderr --partial "crowdsec local API is disabled" + assert_stderr --partial "Local API is disabled, please run this command on the local API machine" +} diff --git a/tests/bats/03_noagent.bats b/tests/bats/03_noagent.bats new file mode 100644 index 0000000..d9150af --- /dev/null +++ b/tests/bats/03_noagent.bats @@ -0,0 +1,75 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "with agent: test without -no-cs flag" { + run -124 timeout 2s "${CROWDSEC}" + # from `man timeout`: If the command times out, and --preserve-status is not set, then exit with status 124. +} + +@test "no agent: crowdsec LAPI should run (-no-cs flag)" { + run -124 timeout 2s "${CROWDSEC}" -no-cs +} + +@test "no agent: crowdsec LAPI should run (no crowdsec_service in configuration file)" { + config_disable_agent + run -124 --separate-stderr timeout 2s "${CROWDSEC}" + + assert_stderr --partial "crowdsec agent is disabled" +} + +@test "no agent: cscli config show" { + config_disable_agent + run -0 --separate-stderr cscli config show -o human + assert_output --partial "Global:" + assert_output --partial "cscli:" + assert_output --partial "Local API Server:" + + refute_output --partial "Crowdsec:" +} + +@test "no agent: cscli config backup" { + config_disable_agent + backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + run -0 cscli config backup "${backupdir}" + assert_output --partial "Starting configuration backup" + run -1 --separate-stderr cscli config backup "${backupdir}" + + assert_stderr --partial "Failed to backup configurations" + assert_stderr --partial "file exists" + rm -rf -- "${backupdir:?}" +} + +@test "no agent: lapi status should be ok" { + config_disable_agent + ./instance-crowdsec start + run -0 --separate-stderr cscli lapi status + assert_stderr --partial "You can successfully interact with Local API (LAPI)" +} + +@test "cscli metrics" { + config_disable_agent + ./instance-crowdsec start + run -0 cscli lapi status + run -0 cscli metrics +} diff --git a/tests/bats/04_capi.bats b/tests/bats/04_capi.bats new file mode 100644 index 0000000..6564d1f --- /dev/null +++ b/tests/bats/04_capi.bats @@ -0,0 +1,67 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" +} + +#---------- + +@test "cscli capi status" { + config_enable_capi + run -0 cscli capi register --schmilblick githubciXXXXXXXXXXXXXXXXXXXXXXXX + run -0 cscli capi status + assert_output --partial "Loaded credentials from" + assert_output --partial "Trying to authenticate with username" + assert_output --partial " on https://api.crowdsec.net/" + assert_output --partial "You can successfully interact with Central API (CAPI)" +} + +@test "cscli alerts list: receive a community pull when capi is enabled" { + sleep 2 + ./instance-crowdsec start + for ((i=0; i<15; i++)); do + sleep 2 + [[ $(cscli alerts list -a -o json 2>/dev/null || cscli alerts list -o json) != "null" ]] && break + done + + run cscli alerts list -a -o json + if [[ "${status}" -ne 0 ]]; then + run --separate-stderr cscli alerts list -o json + fi + run -0 jq -r '. | length' <(output) + refute_output 0 +} + +@test "we have exactly one machine, localhost" { + run -0 --separate-stderr cscli machines list -o json + run -0 jq -c '[. | length, .[0].machineId[0:32], .[0].isValidated, .[0].ipAddress]' <(output) + assert_output '[1,"githubciXXXXXXXXXXXXXXXXXXXXXXXX",true,"127.0.0.1"]' +} + +@test "no agent: capi status should be ok" { + ./instance-crowdsec stop + config_disable_agent + ./instance-crowdsec start + run -0 --separate-stderr cscli capi status + assert_stderr --partial "You can successfully interact with Central API (CAPI)" +} + +@test "cscli capi status: fails without credentials" { + ONLINE_API_CREDENTIALS_YAML="$(config_get '.api.server.online_client.credentials_path')" + rm "${ONLINE_API_CREDENTIALS_YAML}" + run -1 --separate-stderr cscli capi status + assert_stderr --partial "Local API is disabled, please run this command on the local API machine: loading online client credentials: failed to read api server credentials configuration file '${ONLINE_API_CREDENTIALS_YAML}': open ${ONLINE_API_CREDENTIALS_YAML}: no such file or directory" +} diff --git a/tests/bats/04_nocapi.bats b/tests/bats/04_nocapi.bats new file mode 100644 index 0000000..d77c64b --- /dev/null +++ b/tests/bats/04_nocapi.bats @@ -0,0 +1,81 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "without capi: crowdsec LAPI should run without capi (-no-capi flag)" { + config_set '.common.log_media="stdout"' + + run -124 --separate-stderr timeout 1s "${CROWDSEC}" -no-capi + assert_stderr --partial "Communication with CrowdSec Central API disabled from args" +} + +@test "without capi: crowdsec LAPI should still work" { + config_disable_capi + run -124 --separate-stderr timeout 1s "${CROWDSEC}" + # from `man timeout`: If the command times out, and --preserve-status is not set, then exit with status 124. + assert_stderr --partial "push and pull to Central API disabled" +} + +@test "without capi: cscli capi status -> fail" { + config_disable_capi + ./instance-crowdsec start + run -1 --separate-stderr cscli capi status + assert_stderr --partial "no configuration for Central API in " +} + +@test "no capi: cscli config show" { + config_disable_capi + run -0 --separate-stderr cscli config show -o human + assert_output --partial "Global:" + assert_output --partial "cscli:" + assert_output --partial "Crowdsec:" + assert_output --partial "Local API Server:" +} + +@test "no agent: cscli config backup" { + config_disable_capi + backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + run -0 cscli config backup "${backupdir}" + assert_output --partial "Starting configuration backup" + run -1 --separate-stderr cscli config backup "${backupdir}" + assert_stderr --partial "Failed to backup configurations" + assert_stderr --partial "file exists" + rm -rf -- "${backupdir:?}" +} + +@test "without capi: cscli lapi status -> success" { + config_disable_capi + ./instance-crowdsec start + run -0 --separate-stderr cscli lapi status + assert_stderr --partial "You can successfully interact with Local API (LAPI)" +} + +@test "cscli metrics" { + config_disable_capi + ./instance-crowdsec start + run -0 cscli lapi status + run -0 --separate-stderr cscli metrics + assert_output --partial "Route" + assert_output --partial '/v1/watchers/login' + assert_output --partial "Local Api Metrics:" +} diff --git a/tests/bats/05_config_yaml_local.bats b/tests/bats/05_config_yaml_local.bats new file mode 100644 index 0000000..d974ff7 --- /dev/null +++ b/tests/bats/05_config_yaml_local.bats @@ -0,0 +1,143 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +fake_log() { + for _ in $(seq 1 6); do + echo "$(LC_ALL=C date '+%b %d %H:%M:%S ')"'sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424' + done +} + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + run -0 config_get '.api.client.credentials_path' + LOCAL_API_CREDENTIALS="${output}" + export LOCAL_API_CREDENTIALS +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "config.yaml.local - cscli (log_level)" { + config_set '.common.log_level="warning"' + run -0 --separate-stderr cscli config show --key Config.Common.LogLevel + assert_output "warning" + + echo "{'common':{'log_level':'debug'}}" >"${CONFIG_YAML}.local" + run -0 --separate-stderr cscli config show --key Config.Common.LogLevel + assert_output "debug" +} + +@test "config.yaml.local - cscli (log_level - with envvar)" { + config_set '.common.log_level="warning"' + run -0 --separate-stderr cscli config show --key Config.Common.LogLevel + assert_output "warning" + + export CROWDSEC_LOG_LEVEL=debug + echo "{'common':{'log_level':'${CROWDSEC_LOG_LEVEL}'}}" >"${CONFIG_YAML}.local" + run -0 --separate-stderr cscli config show --key Config.Common.LogLevel + assert_output "debug" +} + +@test "config.yaml.local - crowdsec (listen_url)" { + # disable the agent or we'll need to patch api client credentials too + run -0 config_disable_agent + ./instance-crowdsec start + run -0 ./bin/wait-for-port -q 8080 + ./instance-crowdsec stop + run -1 ./bin/wait-for-port -q 8080 + + echo "{'api':{'server':{'listen_uri':127.0.0.1:8083}}}" >"${CONFIG_YAML}.local" + + ./instance-crowdsec start + run -0 ./bin/wait-for-port -q 8083 + run -1 ./bin/wait-for-port -q 8080 + ./instance-crowdsec stop + + rm -f "${CONFIG_YAML}.local" + ./instance-crowdsec start + run -1 ./bin/wait-for-port -q 8083 + run -0 ./bin/wait-for-port -q 8080 +} + +@test "local_api_credentials.yaml.local" { + run -0 config_disable_agent + echo "{'api':{'server':{'listen_uri':127.0.0.1:8083}}}" >"${CONFIG_YAML}.local" + ./instance-crowdsec start + run -0 ./bin/wait-for-port -q 8083 + + run -1 cscli decisions list + echo "{'url':'http://127.0.0.1:8083'}" >"${LOCAL_API_CREDENTIALS}.local" + + run -0 cscli decisions list +} + +@test "simulation.yaml.local" { + run -0 config_get '.config_paths.simulation_path' + refute_output null + SIMULATION="${output}" + + echo "simulation: off" >"${SIMULATION}" + run -0 cscli simulation status -o human + assert_output --partial "global simulation: disabled" + + echo "simulation: on" >"${SIMULATION}" + run -0 cscli simulation status -o human + assert_output --partial "global simulation: enabled" + + echo "simulation: off" >"${SIMULATION}.local" + run -0 cscli simulation status -o human + assert_output --partial "global simulation: disabled" + + rm -f "${SIMULATION}.local" + run -0 cscli simulation status -o human + assert_output --partial "global simulation: enabled" +} + +@test "profiles.yaml.local" { + run -0 --separate-stderr config_get '.api.server.profiles_path' + refute_output null + PROFILES="${output}" + + cat <<-EOT >"${PROFILES}.local" + name: default_ip_remediation + filters: + - Alert.Remediation == true && Alert.GetScope() == "Ip" + decisions: + - type: captcha + duration: 2h + on_success: break + EOT + + tmpfile=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp) + touch "${tmpfile}" + ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') + echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"${ACQUIS_YAML}" + + ./instance-crowdsec start + sleep .5 + fake_log >>"${tmpfile}" + + # this could be simplified, but some systems are slow and we don't want to + # wait more than required + for ((i=0;i<30;i++)); do + sleep .5 + run -0 --separate-stderr cscli decisions list -o json + run -0 jq --exit-status '.[].decisions[0] | [.value,.type] == ["1.1.1.172","captcha"]' <(output) && break + done + rm -f -- "${tmpfile}" + [[ "${status}" -eq 0 ]] || fail "captcha not triggered" +} diff --git a/tests/bats/10_bouncers.bats b/tests/bats/10_bouncers.bats new file mode 100644 index 0000000..832fea0 --- /dev/null +++ b/tests/bats/10_bouncers.bats @@ -0,0 +1,58 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "there are 0 bouncers" { + run -0 --separate-stderr cscli bouncers list -o json + assert_output "[]" +} + +@test "we can add one bouncer, and delete it" { + run -0 cscli bouncers add ciTestBouncer + assert_output --partial "Api key for 'ciTestBouncer':" + run -0 cscli bouncers delete ciTestBouncer + run -0 --separate-stderr cscli bouncers list -o json + assert_output '[]' +} + +@test "we can't add the same bouncer twice" { + run -0 cscli bouncers add ciTestBouncer + run -1 --separate-stderr cscli bouncers add ciTestBouncer -o json + + run -0 jq -r '.level' <(stderr) + assert_output 'fatal' + run -0 jq -r '.msg' <(stderr) + assert_output "unable to create bouncer: bouncer ciTestBouncer already exists" + + run -0 cscli bouncers list -o json + run -0 jq '. | length' <(output) + assert_output 1 +} + +@test "delete the bouncer multiple times, even if it does not exist" { + run -0 cscli bouncers add ciTestBouncer + run -0 cscli bouncers delete ciTestBouncer + run -1 cscli bouncers delete ciTestBouncer + run -1 cscli bouncers delete foobarbaz +} diff --git a/tests/bats/11_bouncers_tls.bats b/tests/bats/11_bouncers_tls.bats new file mode 100644 index 0000000..dcb1ff4 --- /dev/null +++ b/tests/bats/11_bouncers_tls.bats @@ -0,0 +1,97 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + + tmpdir="${BATS_FILE_TMPDIR}" + export tmpdir + + CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" + export CFDIR + + #gen the CA + cfssl gencert --initca "${CFDIR}/ca.json" 2>/dev/null | cfssljson --bare "${tmpdir}/ca" + #gen an intermediate + cfssl gencert --initca "${CFDIR}/intermediate.json" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" + cfssl sign -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile intermediate_ca "${tmpdir}/inter.csr" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" + #gen server cert for crowdsec with the intermediate + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=server "${CFDIR}/server.json" 2>/dev/null | cfssljson --bare "${tmpdir}/server" + #gen client cert for the bouncer + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer" + #gen client cert for the bouncer with an invalid OU + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer_invalid.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_bad_ou" + #gen client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate + cfssl gencert -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_invalid" + + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_revoked" + serial="$(openssl x509 -noout -serial -in "${tmpdir}/bouncer_revoked.pem" | cut -d '=' -f2)" + echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials.txt" + cfssl gencrl "${tmpdir}/serials.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl.pem" + + cat "${tmpdir}/ca.pem" "${tmpdir}/inter.pem" > "${tmpdir}/bundle.pem" + + config_set ' + .api.server.tls.cert_file=strenv(tmpdir) + "/server.pem" | + .api.server.tls.key_file=strenv(tmpdir) + "/server-key.pem" | + .api.server.tls.ca_cert_path=strenv(tmpdir) + "/inter.pem" | + .api.server.tls.crl_path=strenv(tmpdir) + "/crl.pem" | + .api.server.tls.bouncers_allowed_ou=["bouncer-ou"] + ' + + config_disable_agent +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-crowdsec start +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "there are 0 bouncers" { + run -0 --separate-stderr cscli bouncers list -o json + assert_output "[]" +} + +@test "simulate one bouncer request with a valid cert" { + run -0 curl -s --cert "${tmpdir}/bouncer.pem" --key "${tmpdir}/bouncer-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_output "null" + run -0 --separate-stderr cscli bouncers list -o json + run -0 jq '. | length' <(output) + assert_output '1' + run -0 --separate-stderr cscli bouncers list -o json + run -0 jq -r '.[] | .name' <(output) + assert_output "localhost@127.0.0.1" + run cscli bouncers delete localhost@127.0.0.1 +} + +@test "simulate one bouncer request with an invalid cert" { + run curl -s --cert "${tmpdir}/bouncer_invalid.pem" --key "${tmpdir}/bouncer_invalid-key.pem" --cacert "${tmpdir}/ca-key.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 + run -0 --separate-stderr cscli bouncers list -o json + assert_output "[]" +} + +@test "simulate one bouncer request with an invalid OU" { + run curl -s --cert "${tmpdir}/bouncer_bad_ou.pem" --key "${tmpdir}/bouncer_bad_ou-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 + run -0 --separate-stderr cscli bouncers list -o json + assert_output "[]" +} + +@test "simulate one bouncer request with a revoked certificate" { + run -0 curl -i -s --cert "${tmpdir}/bouncer_revoked.pem" --key "${tmpdir}/bouncer_revoked-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_output --partial "access forbidden" + run -0 --separate-stderr cscli bouncers list -o json + assert_output "[]" +} diff --git a/tests/bats/20_collections.bats b/tests/bats/20_collections.bats new file mode 100644 index 0000000..c63d4c3 --- /dev/null +++ b/tests/bats/20_collections.bats @@ -0,0 +1,114 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "we can list collections" { + run -0 cscli collections list +} + +@test "there are 2 collections (linux and sshd)" { + run -0 cscli collections list -o json + run -0 jq '.collections | length' <(output) + assert_output 2 +} + +@test "can install a collection (as a regular user) and remove it" { + # collection is not installed + run -0 cscli collections list -o json + run -0 jq -r '.collections[].name' <(output) + refute_line "crowdsecurity/mysql" + + # we install it + run -0 cscli collections install crowdsecurity/mysql -o human + assert_output --partial "Enabled crowdsecurity/mysql" + + # it has been installed + run -0 cscli collections list -o json + run -0 jq -r '.collections[].name' <(output) + assert_line "crowdsecurity/mysql" + + # we install it + run -0 cscli collections remove crowdsecurity/mysql -o human + assert_output --partial "Removed symlink [crowdsecurity/mysql]" + + # it has been removed + run -0 cscli collections list -o json + run -0 jq -r '.collections[].name' <(output) + refute_line "crowdsecurity/mysql" +} + +@test "must use --force to remove a collection that belongs to another, which becomes tainted" { + # we expect no error since we may have multiple collections, some removed and some not + run -0 --separate-stderr cscli collections remove crowdsecurity/sshd + assert_stderr --partial "crowdsecurity/sshd belongs to other collections" + assert_stderr --partial "[crowdsecurity/linux]" + + run -0 --separate-stderr cscli collections remove crowdsecurity/sshd --force + assert_stderr --partial "Removed symlink [crowdsecurity/sshd]" + run -0 cscli collections inspect crowdsecurity/linux -o json + run -0 jq -r '.tainted' <(output) + assert_output "true" +} + +@test "can remove a collection" { + run -0 cscli collections remove crowdsecurity/linux + assert_output --partial "Removed" + assert_output --regexp ".*for the new configuration to be effective." + run -0 cscli collections inspect crowdsecurity/linux -o human + assert_line 'installed: false' +} + +@test "collections delete is an alias for collections remove" { + run -0 cscli collections delete crowdsecurity/linux + assert_output --partial "Removed" + assert_output --regexp ".*for the new configuration to be effective." +} + +@test "removing a collection that does not exist is noop" { + run -0 cscli collections remove crowdsecurity/apache2 + refute_output --partial "Removed" + assert_output --regexp ".*for the new configuration to be effective." +} + +@test "can remove a removed collection" { + run -0 cscli collections install crowdsecurity/mysql + run -0 cscli collections remove crowdsecurity/mysql + assert_output --partial "Removed" + run -0 cscli collections remove crowdsecurity/mysql + refute_output --partial "Removed" +} + +@test "can remove all collections" { + # we may have this too, from package installs + run cscli parsers delete crowdsecurity/whitelists + run -0 cscli collections remove --all + assert_output --partial "Removed symlink [crowdsecurity/sshd]" + assert_output --partial "Removed symlink [crowdsecurity/linux]" + run -0 cscli hub list -o json + assert_json '{collections:[],parsers:[],postoverflows:[],scenarios:[]}' + run -0 cscli collections remove --all + assert_output --partial 'Disabled 0 items' +} + +# TODO test download-only diff --git a/tests/bats/30_machines.bats b/tests/bats/30_machines.bats new file mode 100644 index 0000000..8f2c33d --- /dev/null +++ b/tests/bats/30_machines.bats @@ -0,0 +1,83 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "can list machines as regular user" { + run -0 cscli machines list +} + +@test "we have exactly one machine" { + run -0 --separate-stderr cscli machines list -o json + run -0 jq -c '[. | length, .[0].machineId[0:32], .[0].isValidated]' <(output) + assert_output '[1,"githubciXXXXXXXXXXXXXXXXXXXXXXXX",true]' +} + +@test "add a new machine and delete it" { + run -0 cscli machines add -a -f /dev/null CiTestMachine -o human + assert_output --partial "Machine 'CiTestMachine' successfully added to the local API" + assert_output --partial "API credentials dumped to '/dev/null'" + + # we now have two machines + run -0 --separate-stderr cscli machines list -o json + run -0 jq -c '[. | length, .[-1].machineId, .[0].isValidated]' <(output) + assert_output '[2,"CiTestMachine",true]' + + # delete the test machine + run -0 cscli machines delete CiTestMachine -o human + assert_output --partial "machine 'CiTestMachine' deleted successfully" + + # we now have one machine again + run -0 --separate-stderr cscli machines list -o json + run -0 jq '. | length' <(output) + assert_output 1 +} + +@test "register, validate and then remove a machine" { + run -0 cscli lapi register --machine CiTestMachineRegister -f /dev/null -o human + assert_output --partial "Successfully registered to Local API (LAPI)" + assert_output --partial "Local API credentials dumped to '/dev/null'" + + # the machine is not validated yet + run -0 --separate-stderr cscli machines list -o json + run -0 jq '.[-1].isValidated' <(output) + assert_output 'null' + + # validate the machine + run -0 cscli machines validate CiTestMachineRegister -o human + assert_output --partial "machine 'CiTestMachineRegister' validated successfully" + + # the machine is now validated + run -0 --separate-stderr cscli machines list -o json + run -0 jq '.[-1].isValidated' <(output) + assert_output 'true' + + # delete the test machine again + run -0 cscli machines delete CiTestMachineRegister -o human + assert_output --partial "machine 'CiTestMachineRegister' deleted successfully" + + # we now have one machine, again + run -0 --separate-stderr cscli machines list -o json + run -0 jq '. | length' <(output) + assert_output 1 +} diff --git a/tests/bats/30_machines_tls.bats b/tests/bats/30_machines_tls.bats new file mode 100644 index 0000000..05976c2 --- /dev/null +++ b/tests/bats/30_machines_tls.bats @@ -0,0 +1,130 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + + CONFIG_DIR=$(dirname "${CONFIG_YAML}") + export CONFIG_DIR + + tmpdir="${BATS_FILE_TMPDIR}" + export tmpdir + + CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" + export CFDIR + + #gen the CA + cfssl gencert --initca "${CFDIR}/ca.json" 2>/dev/null | cfssljson --bare "${tmpdir}/ca" + #gen an intermediate + cfssl gencert --initca "${CFDIR}/intermediate.json" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" + cfssl sign -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile intermediate_ca "${tmpdir}/inter.csr" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" + #gen server cert for crowdsec with the intermediate + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=server "${CFDIR}/server.json" 2>/dev/null | cfssljson --bare "${tmpdir}/server" + #gen client cert for the agent + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent" + #gen client cert for the agent with an invalid OU + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent_invalid.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_bad_ou" + #gen client cert for the agent directly signed by the CA, it should be refused by crowdsec as uses the intermediate + cfssl gencert -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_invalid" + + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_revoked" + serial="$(openssl x509 -noout -serial -in "${tmpdir}/agent_revoked.pem" | cut -d '=' -f2)" + echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials.txt" + cfssl gencrl "${tmpdir}/serials.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl.pem" + + cat "${tmpdir}/ca.pem" "${tmpdir}/inter.pem" > "${tmpdir}/bundle.pem" + + config_set ' + .api.server.tls.cert_file=strenv(tmpdir) + "/server.pem" | + .api.server.tls.key_file=strenv(tmpdir) + "/server-key.pem" | + .api.server.tls.ca_cert_path=strenv(tmpdir) + "/inter.pem" | + .api.server.tls.crl_path=strenv(tmpdir) + "/crl.pem" | + .api.server.tls.agents_allowed_ou=["agent-ou"] + ' + + # remove all machines + + run -0 cscli machines list -o json + run -0 jq -r '.[].machineId' <(output) + for machine in $(output); do + run -0 cscli machines delete "${machine}" + done + + config_disable_agent +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "invalid OU for agent" { + config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | + .key_path=strenv(tmpdir) + "/agent_bad_ou-key.pem" | + .cert_path=strenv(tmpdir) + "/agent_bad_ou.pem" | + .url="https://127.0.0.1:8080" + ' + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + ./instance-crowdsec start + run -0 cscli machines list -o json + assert_output '[]' +} + +@test "we have exactly one machine registered with TLS" { + config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | + .key_path=strenv(tmpdir) + "/agent-key.pem" | + .cert_path=strenv(tmpdir) + "/agent.pem" | + .url="https://127.0.0.1:8080" + ' + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + ./instance-crowdsec start + run -0 cscli lapi status + run -0 cscli machines list -o json + run -0 jq -c '[. | length, .[0].machineId[0:32], .[0].isValidated, .[0].ipAddress, .[0].auth_type]' <(output) + + assert_output '[1,"localhost@127.0.0.1",true,"127.0.0.1","tls"]' + cscli machines delete localhost@127.0.0.1 +} + +@test "invalid cert for agent" { + config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | + .key_path=strenv(tmpdir) + "/agent_invalid-key.pem" | + .cert_path=strenv(tmpdir) + "/agent_invalid.pem" | + .url="https://127.0.0.1:8080" + ' + config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + ./instance-crowdsec start + run -0 cscli machines list -o json + assert_output '[]' +} + +@test "revoked cert for agent" { + config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | + .key_path=strenv(tmpdir) + "/agent_revoked-key.pem" | + .cert_path=strenv(tmpdir) + "/agent_revoked.pem" | + .url="https://127.0.0.1:8080" + ' + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + ./instance-crowdsec start + run -0 cscli machines list -o json + assert_output '[]' +} diff --git a/tests/bats/40_cold-logs.bats b/tests/bats/40_cold-logs.bats new file mode 100644 index 0000000..c04cf6a --- /dev/null +++ b/tests/bats/40_cold-logs.bats @@ -0,0 +1,63 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +fake_log() { + for _ in $(seq 1 6); do + echo "$(LC_ALL=C date '+%b %d %H:%M:%S ')"'sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424' + done +} + +setup_file() { + load "../lib/setup_file.sh" + + # we reset config and data, and only run the daemon once for all the tests in this file + ./instance-data load + ./instance-crowdsec start + fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api 2>/dev/null +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" +} + +#---------- + +@test "we have one decision" { + run -0 cscli decisions list -o json + run -0 jq '. | length' <(output) + assert_output 1 +} + +@test "1.1.1.172 has been banned" { + run -0 cscli decisions list -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output '1.1.1.172' +} + +@test "1.1.1.172 has been banned (range/contained: -r 1.1.1.0/24 --contained)" { + run -0 cscli decisions list -r 1.1.1.0/24 --contained -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output '1.1.1.172' +} + +@test "1.1.1.172 has not been banned (range/NOT-contained: -r 1.1.2.0/24)" { + run -0 cscli decisions list -r 1.1.2.0/24 -o json + assert_output 'null' +} + +@test "1.1.1.172 has been banned (exact: -i 1.1.1.172)" { + run -0 cscli decisions list -i 1.1.1.172 -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output '1.1.1.172' +} + +@test "1.1.1.173 has not been banned (exact: -i 1.1.1.173)" { + run -0 cscli decisions list -i 1.1.1.173 -o json + assert_output 'null' +} diff --git a/tests/bats/40_live-ban.bats b/tests/bats/40_live-ban.bats new file mode 100644 index 0000000..8551b28 --- /dev/null +++ b/tests/bats/40_live-ban.bats @@ -0,0 +1,45 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +fake_log() { + for _ in $(seq 1 6); do + echo "$(LC_ALL=C date '+%b %d %H:%M:%S ')"'sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424' + done +} + +setup_file() { + load "../lib/setup_file.sh" + # we reset config and data, but run the daemon only in the tests that need it + ./instance-data load +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "1.1.1.172 has been banned" { + tmpfile=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp) + touch "${tmpfile}" + ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') + echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"${ACQUIS_YAML}" + + ./instance-crowdsec start + fake_log >>"${tmpfile}" + sleep 2 + rm -f -- "${tmpfile}" + run -0 cscli decisions list -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output '1.1.1.172' +} diff --git a/tests/bats/50_simulation.bats b/tests/bats/50_simulation.bats new file mode 100644 index 0000000..bff6ba6 --- /dev/null +++ b/tests/bats/50_simulation.bats @@ -0,0 +1,66 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +fake_log() { + for _ in $(seq 1 10); do + echo "$(LC_ALL=C date '+%b %d %H:%M:%S ')"'sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.174 port 35424' + done +} + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + ./instance-crowdsec start +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + cscli decisions delete --all +} + +#---------- + +@test "we have one decision" { + run -0 cscli simulation disable --global + fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + run -0 cscli decisions list -o json + run -0 jq '. | length' <(output) + assert_output 1 +} + +@test "1.1.1.174 has been banned (exact)" { + run -0 cscli simulation disable --global + fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + run -0 cscli decisions list -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output '1.1.1.174' +} + +@test "decision has simulated == false (exact)" { + run -0 cscli simulation disable --global + fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + run -0 cscli decisions list -o json + run -0 jq '.[].decisions[0].simulated' <(output) + assert_output 'false' +} + +@test "simulated scenario, listing non-simulated: expect no decision" { + run -0 cscli simulation enable crowdsecurity/ssh-bf + fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + run -0 cscli decisions list --no-simu -o json + assert_output 'null' +} + +@test "global simulation, listing non-simulated: expect no decision" { + run -0 cscli simulation disable crowdsecurity/ssh-bf + run -0 cscli simulation enable --global + fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + run -0 cscli decisions list --no-simu -o json + assert_output 'null' +} diff --git a/tests/bats/70_http_plugin.bats b/tests/bats/70_http_plugin.bats new file mode 100644 index 0000000..aeaa3fd --- /dev/null +++ b/tests/bats/70_http_plugin.bats @@ -0,0 +1,86 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + # eval "$(debug)" + ./instance-data load + + MOCK_OUT="${LOG_DIR}/mock-http.out" + export MOCK_OUT + MOCK_PORT="9999" + MOCK_URL="http://localhost:${MOCK_PORT}" + export MOCK_URL + PLUGIN_DIR=$(config_get '.config_paths.plugin_dir') + # could have a trailing slash + PLUGIN_DIR=$(realpath "${PLUGIN_DIR}") + export PLUGIN_DIR + + # https://mikefarah.gitbook.io/yq/operators/env-variable-operators + config_set "$(config_get '.config_paths.notification_dir')/http.yaml" ' + .url=strenv(MOCK_URL) | + .group_wait="5s" | + .group_threshold=2 + ' + + config_set "$(config_get '.api.server.profiles_path')" ' + .notifications=["http_default"] | + .filters=["Alert.GetScope() == \"Ip\""] + ' + + config_set ' + .plugin_config.user="" | + .plugin_config.group="" + ' + + rm -f -- "${MOCK_OUT}" + + ./instance-crowdsec start + ./instance-mock-http start "${MOCK_PORT}" +} + +teardown_file() { + load "../lib/teardown_file.sh" + ./instance-crowdsec stop + ./instance-mock-http stop +} + +setup() { + load "../lib/setup.sh" +} + +#---------- + +@test "add two bans" { + run -0 cscli decisions add --ip 1.2.3.4 --duration 30s + assert_output --partial 'Decision successfully added' + + run -0 cscli decisions add --ip 1.2.3.5 --duration 30s + assert_output --partial 'Decision successfully added' + sleep 5 +} + +@test "expected 1 log line from http server" { + run -0 wc -l <"${MOCK_OUT}" + # wc can pad with spaces on some platforms + run -0 tr -d ' ' < <(output) + assert_output 1 +} + +@test "expected to receive 2 alerts in the request body from plugin" { + run -0 jq -r '.request_body' <"${MOCK_OUT}" + run -0 jq -r 'length' <(output) + assert_output 2 +} + +@test "expected to receive IP 1.2.3.4 as value of first decision" { + run -0 jq -r '.request_body[0].decisions[0].value' <"${MOCK_OUT}" + assert_output 1.2.3.4 +} + +@test "expected to receive IP 1.2.3.5 as value of second decision" { + run -0 jq -r '.request_body[1].decisions[0].value' <"${MOCK_OUT}" + assert_output 1.2.3.5 +} diff --git a/tests/bats/71_dummy_plugin.bats b/tests/bats/71_dummy_plugin.bats new file mode 100644 index 0000000..f5bdd0e --- /dev/null +++ b/tests/bats/71_dummy_plugin.bats @@ -0,0 +1,78 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + [[ -n "${PACKAGE_TESTING}" ]] && return + + ./instance-data load + + tempfile=$(TMPDIR="${BATS_FILE_TMPDIR}" mktemp) + export tempfile + + tempfile2=$(TMPDIR="${BATS_FILE_TMPDIR}" mktemp) + export tempfile2 + + DUMMY_YAML="$(config_get '.config_paths.notification_dir')/dummy.yaml" + + config_set "${DUMMY_YAML}" ' + .group_wait="5s" | + .group_threshold=2 | + .output_file=strenv(tempfile) | + .format="{{.|toJson}}" + ' + + cat <<-EOT >>"${DUMMY_YAML}" + --- + type: dummy + name: dummy_2 + log_level: info + format: secondfile + output_file: ${tempfile2} + EOT + + config_set "$(config_get '.api.server.profiles_path')" ' + .notifications=["dummy_default","dummy_2"] | + .filters=["Alert.GetScope() == \"Ip\""] + ' + + config_set ' + .plugin_config.user="" | + .plugin_config.group="" + ' + + ./instance-crowdsec start +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + [[ -n "${PACKAGE_TESTING}" ]] && skip + load "../lib/setup.sh" +} + +#---------- + +@test "add two bans" { + run -0 cscli decisions add --ip 1.2.3.4 --duration 30s + assert_output --partial 'Decision successfully added' + + run -0 cscli decisions add --ip 1.2.3.5 --duration 30s + assert_output --partial 'Decision successfully added' + sleep 2 +} + +@test "expected 1 notification" { + run -0 cat "${tempfile}" + assert_output --partial 1.2.3.4 + assert_output --partial 1.2.3.5 +} + +@test "second notification works too" { + run -0 cat "${tempfile2}" + assert_output --partial secondfile +} diff --git a/tests/bats/72_plugin_badconfig.bats b/tests/bats/72_plugin_badconfig.bats new file mode 100644 index 0000000..49da10c --- /dev/null +++ b/tests/bats/72_plugin_badconfig.bats @@ -0,0 +1,110 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + + PLUGIN_DIR=$(config_get '.config_paths.plugin_dir') + # could have a trailing slash + PLUGIN_DIR=$(realpath "${PLUGIN_DIR}") + export PLUGIN_DIR + + PROFILES_PATH=$(config_get '.api.server.profiles_path') + export PROFILES_PATH +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop + rm -f "${PLUGIN_DIR}"/badname + chmod go-w "${PLUGIN_DIR}"/notification-http +} + +#---------- + +@test "misconfigured plugin, only user is empty" { + config_set '.plugin_config.user="" | .plugin_config.group="nogroup"' + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: unable to run local API: while loading plugin: while getting process attributes: both plugin user and group must be set" +} + +@test "misconfigured plugin, only group is empty" { + config_set '(.plugin_config.user="nobody") | (.plugin_config.group="")' + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: unable to run local API: while loading plugin: while getting process attributes: both plugin user and group must be set" +} + +@test "misconfigured plugin, user does not exist" { + config_set '(.plugin_config.user="userdoesnotexist") | (.plugin_config.group="groupdoesnotexist")' + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: unable to run local API: while loading plugin: while getting process attributes: user: unknown user userdoesnotexist" +} + +@test "misconfigured plugin, group does not exist" { + config_set '(.plugin_config.user=strenv(USER)) | (.plugin_config.group="groupdoesnotexist")' + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: unable to run local API: while loading plugin: while getting process attributes: group: unknown group groupdoesnotexist" +} + +@test "bad plugin name" { + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + cp "${PLUGIN_DIR}"/notification-http "${PLUGIN_DIR}"/badname + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: unable to run local API: while loading plugin: plugin name ${PLUGIN_DIR}/badname is invalid. Name should be like {type-name}" +} + +@test "bad plugin permission (group writable)" { + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + chmod g+w "${PLUGIN_DIR}"/notification-http + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: unable to run local API: while loading plugin: plugin at ${PLUGIN_DIR}/notification-http is group writable, group writable plugins are invalid" +} + +@test "bad plugin permission (world writable)" { + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + chmod o+w "${PLUGIN_DIR}"/notification-http + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: unable to run local API: while loading plugin: plugin at ${PLUGIN_DIR}/notification-http is world writable, world writable plugins are invalid" +} + +@test "config.yaml: missing .plugin_config section" { + config_set 'del(.plugin_config)' + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: plugins are enabled, but the plugin_config section is missing in the configuration" +} + +@test "config.yaml: missing config_paths.notification_dir" { + config_set 'del(.config_paths.notification_dir)' + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: plugins are enabled, but config_paths.notification_dir is not defined" +} + +@test "config.yaml: missing config_paths.plugin_dir" { + config_set 'del(.config_paths.plugin_dir)' + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: plugins are enabled, but config_paths.plugin_dir is not defined" +} + +@test "unable to run local API: while reading plugin config" { + config_set '.config_paths.notification_dir="/this/path/does/not/exist"' + config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + run -1 --separate-stderr timeout 2s "${CROWDSEC}" + assert_stderr --partial "api server init: unable to run local API: while loading plugin config: open /this/path/does/not/exist: no such file or directory" +} diff --git a/tests/bats/80_alerts.bats b/tests/bats/80_alerts.bats new file mode 100644 index 0000000..520c658 --- /dev/null +++ b/tests/bats/80_alerts.bats @@ -0,0 +1,193 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli alerts list, with and without --machine" { + is_db_postgres && skip + run -0 cscli decisions add -i 10.20.30.40 -t ban + + run -0 cscli alerts list + refute_output --partial 'machine' + # machine name appears quoted in the "REASON" column + assert_output --regexp " 'githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?' " + refute_output --regexp " githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})? " + + run -0 cscli alerts list -m + assert_output --partial 'machine' + assert_output --regexp " 'githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?' " + assert_output --regexp " githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})? " + + run -0 cscli alerts list --machine + assert_output --partial 'machine' + assert_output --regexp " 'githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?' " + assert_output --regexp " githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})? " +} + +@test "cscli alerts list, human/json/raw" { + run -0 cscli decisions add -i 10.20.30.40 -t ban + + run -0 cscli alerts list -o human + run -0 plaintext < <(output) + assert_output --regexp ".* ID .* value .* reason .* country .* as .* decisions .* created_at .*" + assert_output --regexp ".*Ip:10.20.30.40.*manual 'ban' from.*ban:1.*" + + run -0 cscli alerts list -o json + run -0 jq -c '.[].decisions[0] | [.origin, .scenario, .scope, .simulated, .type, .value]' <(output) + assert_line --regexp "\[\"cscli\",\"manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?'\",\"Ip\",false,\"ban\",\"10.20.30.40\"\]" + + run -0 cscli alerts list -o raw + assert_line "id,scope,value,reason,country,as,decisions,created_at" + assert_line --regexp ".*,Ip,10.20.30.40,manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?',,,ban:1,.*" + + run -0 cscli alerts list -o raw --machine + assert_line "id,scope,value,reason,country,as,decisions,created_at,machine" + assert_line --regexp "^[0-9]+,Ip,10.20.30.40,manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?',,,ban:1,.*,githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?$" +} + +@test "cscli alerts inspect" { + run -0 cscli decisions add -i 10.20.30.40 -t ban + run -0 cscli alerts list -o raw <(output) + run -0 grep 10.20.30.40 <(output) + run -0 cut -d, -f1 <(output) + ALERT_ID="${output}" + + run -0 cscli alerts inspect "${ALERT_ID}" -o human + run -0 plaintext < <(output) + assert_line --regexp '^#+$' + assert_line --regexp "^ - ID *: ${ALERT_ID}$" + assert_line --regexp "^ - Date *: .*$" + assert_line --regexp "^ - Machine *: githubciXXXXXXXXXXXXXXXXXXXXXXXX.*" + assert_line --regexp "^ - Simulation *: false$" + assert_line --regexp "^ - Reason *: manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX.*'$" + assert_line --regexp "^ - Events Count *: 1$" + assert_line --regexp "^ - Scope:Value *: Ip:10.20.30.40$" + assert_line --regexp "^ - Country *: *$" + assert_line --regexp "^ - AS *: *$" + assert_line --regexp "^ - Begin *: .*$" + assert_line --regexp "^ - End *: .*$" + assert_line --regexp "^ - Active Decisions *:$" + assert_line --regexp "^.* ID .* scope:value .* action .* expiration .* created_at .*$" + assert_line --regexp "^.* Ip:10.20.30.40 .* ban .*$" + + run -0 cscli alerts inspect "${ALERT_ID}" -o human --details + # XXX can we have something here? + + run -0 cscli alerts inspect "${ALERT_ID}" -o raw + assert_line --regexp "^ *capacity: 0$" + assert_line --regexp "^ *id: ${ALERT_ID}$" + assert_line --regexp "^ *origin: cscli$" + assert_line --regexp "^ *scenario: manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX.*'$" + assert_line --regexp "^ *scope: Ip$" + assert_line --regexp "^ *simulated: false$" + assert_line --regexp "^ *type: ban$" + assert_line --regexp "^ *value: 10.20.30.40$" + + run -0 cscli alerts inspect "${ALERT_ID}" -o json + alert=${output} + run jq -c '.decisions[] | [.origin,.scenario,.scope,.simulated,.type,.value]' <<<"${alert}" + assert_output --regexp "\[\"cscli\",\"manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX.*'\",\"Ip\",false,\"ban\",\"10.20.30.40\"\]" + run jq -c '.source' <<<"${alert}" + assert_json '{ip:"10.20.30.40",scope:"Ip",value:"10.20.30.40"}' +} + +@test "no active alerts" { + run -0 cscli alerts list --until 200d -o human + assert_output "No active alerts" + run -0 cscli alerts list --until 200d -o json + assert_output "null" + run -0 cscli alerts list --until 200d -o raw + assert_output "id,scope,value,reason,country,as,decisions,created_at" + run -0 cscli alerts list --until 200d -o raw --machine + assert_output "id,scope,value,reason,country,as,decisions,created_at,machine" +} + +@test "cscli alerts delete (by id)" { + run -0 cscli alerts delete --help + if [[ ! "$output" =~ "--id string" ]]; then + skip "cscli alerts delete --id not supported" + fi + + # make sure there is at least one alert + run -0 cscli decisions add -i 127.0.0.1 -d 1h -R crowdsecurity/test + # when testing with global config, alert id is not guaranteed to be 1. + # we'll just remove the first alert we find + run -0 --separate-stderr cscli alerts list -o json + run -0 jq -c '.[0].id' <(output) + ALERT_ID="$output" + + run -0 --separate-stderr cscli alerts delete --id "$ALERT_ID" + refute_output + assert_stderr --partial "1 alert(s) deleted" + + # can't delete twice + run -1 --separate-stderr cscli alerts delete --id "$ALERT_ID" + refute_output + assert_stderr --partial "Unable to delete alert" + assert_stderr --partial "API error: ent: alert not found" +} + +@test "cscli alerts delete (all)" { + run -0 --separate-stderr cscli alerts delete --all + assert_stderr --partial '0 alert(s) deleted' + + run -0 cscli decisions add -i 1.2.3.4 -d 1h -R crowdsecurity/test + run -0 cscli decisions add -i 1.2.3.5 -d 1h -R crowdsecurity/test + + run -0 --separate-stderr cscli alerts delete --all + assert_stderr --partial '2 alert(s) deleted' + + # XXX TODO: delete by scope, value, scenario, range.. +} + +@test "cscli alerts delete (with cascade to decisions)" { + run -0 cscli decisions add -i 1.2.3.4 + run -0 --separate-stderr cscli decisions list -o json + run -0 jq '. | length' <(output) + assert_output 1 + + run -0 --separate-stderr cscli alerts delete -i 1.2.3.4 + assert_stderr --partial 'alert(s) deleted' + run -0 --separate-stderr cscli decisions list -o json + assert_output null +} + +@test "cscli alerts delete (must ignore the query limit)" { + for i in $(seq 1 200); do + run -0 cscli decisions add -i 1.2.3.4 + done + run -0 --separate-stderr cscli alerts delete -i 1.2.3.4 + assert_stderr --partial '200 alert(s) deleted' +} + +@test "bad duration" { + skip 'TODO' + run -0 cscli decisions add -i 10.20.30.40 -t ban + run -9 --separate-stderr cscli decisions list --ip 10.20.30.40 -o json + run -9 jq -r '.[].decisions[].id' <(output) + DECISION_ID="${output}" + + ./instance-crowdsec stop + run -0 ./instance-db exec_sql "UPDATE decisions SET ... WHERE id=${DECISION_ID}" + ./instance-crowdsec start +} diff --git a/tests/bats/90_decisions.bats b/tests/bats/90_decisions.bats new file mode 100644 index 0000000..bcf3ebb --- /dev/null +++ b/tests/bats/90_decisions.bats @@ -0,0 +1,67 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start +} + +teardown() { + ./instance-crowdsec stop +} + +declare stderr + +#---------- + +@test "'decisions add' requires parameters" { + run -1 --separate-stderr cscli decisions add + assert_line "Usage:" + assert_stderr --partial "Missing arguments, a value is required (--ip, --range or --scope and --value)" + + run -1 --separate-stderr cscli decisions add -o json + run echo "${stderr}" + run -0 jq -c '[ .level, .msg]' <(output) + assert_output '["fatal","Missing arguments, a value is required (--ip, --range or --scope and --value)"]' +} + +@test "cscli decisions list, with and without --machine" { + is_db_postgres && skip + run -0 cscli decisions add -i 10.20.30.40 -t ban + + run -0 cscli decisions list + refute_output --partial 'Machine' + # machine name appears quoted in the "REASON" column + assert_output --regexp " 'githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?' " + refute_output --regexp " githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})? " + + run -0 cscli decisions list -m + assert_output --partial 'Machine' + assert_output --regexp " 'githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?' " + assert_output --regexp " githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})? " + + run -0 cscli decisions list --machine + assert_output --partial 'Machine' + assert_output --regexp " 'githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?' " + assert_output --regexp " githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})? " +} + +@test "cscli decisions list, incorrect parameters" { + run -1 --separate-stderr cscli decisions list --until toto + assert_stderr --partial 'Unable to list decisions : performing request: API error: while parsing duration: time: invalid duration \"toto\"' + run -1 --separate-stderr cscli decisions list --until toto -o json + run echo "${stderr}" + run -0 jq -c '[.level, .msg]' <(output) + assert_output '["fatal","Unable to list decisions : performing request: API error: while parsing duration: time: invalid duration \"toto\""]' +} diff --git a/tests/bats/97_ipv4_single.bats b/tests/bats/97_ipv4_single.bats new file mode 100644 index 0000000..b3ee93e --- /dev/null +++ b/tests/bats/97_ipv4_single.bats @@ -0,0 +1,107 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + ./instance-crowdsec start + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + CROWDSEC_API_URL="http://localhost:8080" + export CROWDSEC_API_URL +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + if is_db_mysql; then sleep 0.3; fi +} + +api() { + URI="$1" + curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" +} + +#---------- + +@test "cli - first decisions list: must be empty" { + # delete community pull + run -0 cscli decisions delete --all + run -0 cscli decisions list -o json + assert_output 'null' +} + +@test "API - first decisions list: must be empty" { + run -0 api '/v1/decisions' + assert_output 'null' +} + +@test "adding decision for 1.2.3.4" { + run -0 cscli decisions add -i '1.2.3.4' + assert_output --partial 'Decision successfully added' +} + +@test "CLI - all decisions" { + run -0 cscli decisions list -o json + run -0 jq -r '.[0].decisions[0].value' <(output) + assert_output '1.2.3.4' +} + +@test "API - all decisions" { + run -0 api '/v1/decisions' + run -0 jq -c '[ . | length, .[0].value ]' <(output) + assert_output '[1,"1.2.3.4"]' +} + +# check ip match + +@test "CLI - decision for 1.2.3.4" { + run -0 cscli decisions list -i '1.2.3.4' -o json + run -0 jq -r '.[0].decisions[0].value' <(output) + assert_output '1.2.3.4' +} + +@test "API - decision for 1.2.3.4" { + run -0 api '/v1/decisions?ip=1.2.3.4' + run -0 jq -r '.[0].value' <(output) + assert_output '1.2.3.4' +} + +@test "CLI - decision for 1.2.3.5" { + run -0 cscli decisions list -i '1.2.3.5' -o json + assert_output 'null' +} + +@test "API - decision for 1.2.3.5" { + run -0 api '/v1/decisions?ip=1.2.3.5' + assert_output 'null' +} + +## check outer range match + +@test "CLI - decision for 1.2.3.0/24" { + run -0 cscli decisions list -r '1.2.3.0/24' -o json + assert_output 'null' +} + +@test "API - decision for 1.2.3.0/24" { + run -0 api '/v1/decisions?range=1.2.3.0/24' + assert_output 'null' +} + +@test "CLI - decisions where IP in 1.2.3.0/24" { + run -0 cscli decisions list -r '1.2.3.0/24' --contained -o json + run -0 jq -r '.[0].decisions[0].value' <(output) + assert_output '1.2.3.4' +} + +@test "API - decisions where IP in 1.2.3.0/24" { + run -0 api '/v1/decisions?range=1.2.3.0/24&contains=false' + run -0 jq -r '.[0].value' <(output) + assert_output '1.2.3.4' +} diff --git a/tests/bats/97_ipv6_single.bats b/tests/bats/97_ipv6_single.bats new file mode 100644 index 0000000..e18099c --- /dev/null +++ b/tests/bats/97_ipv6_single.bats @@ -0,0 +1,155 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + ./instance-crowdsec start + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + CROWDSEC_API_URL="http://localhost:8080" + export CROWDSEC_API_URL +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + if is_db_mysql; then sleep 0.3; fi +} + +api() { + URI="$1" + curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" +} + +#---------- + +@test "cli - first decisions list: must be empty" { + # delete community pull + run -0 cscli decisions delete --all + run -0 cscli decisions list -o json + assert_output 'null' +} + +@test "adding decision for ip 1111:2222:3333:4444:5555:6666:7777:8888" { + run -0 cscli decisions add -i '1111:2222:3333:4444:5555:6666:7777:8888' + assert_output --partial 'Decision successfully added' +} + +@test "CLI - all decisions" { + run -0 cscli decisions list -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output '1111:2222:3333:4444:5555:6666:7777:8888' +} + +@test "API - all decisions" { + run -0 api "/v1/decisions" + run -0 jq -r '.[].value' <(output) + assert_output '1111:2222:3333:4444:5555:6666:7777:8888' +} + +@test "CLI - decisions for ip 1111:2222:3333:4444:5555:6666:7777:8888" { + run -0 cscli decisions list -i '1111:2222:3333:4444:5555:6666:7777:8888' -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output '1111:2222:3333:4444:5555:6666:7777:8888' +} + +@test "API - decisions for ip 1111:2222:3333:4444:5555:6666:7777:888" { + run -0 api '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8888' + run -0 jq -r '.[].value' <(output) + assert_output '1111:2222:3333:4444:5555:6666:7777:8888' +} + +@test "CLI - decisions for ip 1211:2222:3333:4444:5555:6666:7777:8888" { + run -0 cscli decisions list -i '1211:2222:3333:4444:5555:6666:7777:8888' -o json + assert_output 'null' +} + +@test "API - decisions for ip 1211:2222:3333:4444:5555:6666:7777:888" { + run -0 api '/v1/decisions?ip=1211:2222:3333:4444:5555:6666:7777:8888' + assert_output 'null' +} + +@test "CLI - decisions for ip 1111:2222:3333:4444:5555:6666:7777:8887" { + run -0 cscli decisions list -i '1111:2222:3333:4444:5555:6666:7777:8887' -o json + assert_output 'null' +} + +@test "API - decisions for ip 1111:2222:3333:4444:5555:6666:7777:8887" { + run -0 api '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8887' + assert_output 'null' +} + +@test "CLI - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/48" { + run -0 cscli decisions list -r '1111:2222:3333:4444:5555:6666:7777:8888/48' -o json + assert_output 'null' +} + +@test "API - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/48" { + run -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48' + assert_output 'null' +} + +@test "CLI - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/48" { + run -0 cscli decisions list -r '1111:2222:3333:4444:5555:6666:7777:8888/48' --contained -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output '1111:2222:3333:4444:5555:6666:7777:8888' +} + +@test "API - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/48" { + run -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48&&contains=false' + run -0 jq -r '.[].value' <(output) + assert_output '1111:2222:3333:4444:5555:6666:7777:8888' +} + +@test "CLI - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/64" { + run -0 cscli decisions list -r '1111:2222:3333:4444:5555:6666:7777:8888/64' -o json + assert_output 'null' +} + +@test "API - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/64" { + run -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64' + assert_output 'null' +} + +@test "CLI - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/64" { + run -0 cscli decisions list -r '1111:2222:3333:4444:5555:6666:7777:8888/64' -o json --contained + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output '1111:2222:3333:4444:5555:6666:7777:8888' +} + +@test "API - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/64" { + run -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64&&contains=false' + run -0 jq -r '.[].value' <(output) + assert_output '1111:2222:3333:4444:5555:6666:7777:8888' +} + +@test "adding decision for ip 1111:2222:3333:4444:5555:6666:7777:8889" { + run -0 cscli decisions add -i '1111:2222:3333:4444:5555:6666:7777:8889' + assert_output --partial 'Decision successfully added' +} + +@test "deleting decision for ip 1111:2222:3333:4444:5555:6666:7777:8889" { + run -0 cscli decisions delete -i '1111:2222:3333:4444:5555:6666:7777:8889' + assert_output --partial '1 decision(s) deleted' +} + +@test "CLI - decisions for ip 1111:2222:3333:4444:5555:6666:7777:8889 after delete" { + run -0 cscli decisions list -i '1111:2222:3333:4444:5555:6666:7777:8889' -o json + assert_output 'null' +} + +@test "deleting decision for range 1111:2222:3333:4444:5555:6666:7777:8888/64" { + run -0 cscli decisions delete -r '1111:2222:3333:4444:5555:6666:7777:8888/64' --contained + assert_output --partial '1 decision(s) deleted' +} + +@test "CLI - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/64 after delete" { + run -0 cscli decisions list -r '1111:2222:3333:4444:5555:6666:7777:8888/64' -o json --contained + assert_output 'null' +} diff --git a/tests/bats/98_ipv4_range.bats b/tests/bats/98_ipv4_range.bats new file mode 100644 index 0000000..38b237e --- /dev/null +++ b/tests/bats/98_ipv4_range.bats @@ -0,0 +1,134 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + ./instance-crowdsec start + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + CROWDSEC_API_URL="http://localhost:8080" + export CROWDSEC_API_URL +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + if is_db_mysql; then sleep 0.3; fi +} + +api() { + URI="$1" + curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" +} + +#---------- + +@test "cli - first decisions list: must be empty" { + # delete community pull + run -0 cscli decisions delete --all + run -0 cscli decisions list -o json + assert_output 'null' +} + +@test "adding decision for range 4.4.4.0/24" { + run -0 cscli decisions add -r '4.4.4.0/24' + assert_output --partial 'Decision successfully added' +} + +@test "CLI - all decisions" { + run -0 cscli decisions list -o json + run -0 jq -r '.[0].decisions[0].value' <(output) + assert_output '4.4.4.0/24' +} + +@test "API - all decisions" { + run -0 api '/v1/decisions' + run -0 jq -r '.[0].value' <(output) + assert_output '4.4.4.0/24' +} + +# check ip within/outside of range + +@test "CLI - decisions for ip 4.4.4." { + run -0 cscli decisions list -i '4.4.4.3' -o json + run -0 jq -r '.[0].decisions[0].value' <(output) + assert_output '4.4.4.0/24' +} + +@test "API - decisions for ip 4.4.4." { + run -0 api '/v1/decisions?ip=4.4.4.3' + run -0 jq -r '.[0].value' <(output) + assert_output '4.4.4.0/24' +} + +@test "CLI - decisions for ip contained in 4.4.4." { + run -0 cscli decisions list -i '4.4.4.4' -o json --contained + assert_output 'null' +} + +@test "API - decisions for ip contained in 4.4.4." { + run -0 api '/v1/decisions?ip=4.4.4.4&contains=false' + assert_output 'null' +} + +@test "CLI - decisions for ip 5.4.4." { + run -0 cscli decisions list -i '5.4.4.3' -o json + assert_output 'null' +} + +@test "API - decisions for ip 5.4.4." { + run -0 api '/v1/decisions?ip=5.4.4.3' + assert_output 'null' +} + +@test "CLI - decisions for range 4.4.0.0/1" { + run -0 cscli decisions list -r '4.4.0.0/16' -o json + assert_output 'null' +} + +@test "API - decisions for range 4.4.0.0/1" { + run -0 api '/v1/decisions?range=4.4.0.0/16' + assert_output 'null' +} + +@test "CLI - decisions for ip/range in 4.4.0.0/1" { + run -0 cscli decisions list -r '4.4.0.0/16' -o json --contained + run -0 jq -r '.[0].decisions[0].value' <(output) + assert_output '4.4.4.0/24' +} + +@test "API - decisions for ip/range in 4.4.0.0/1" { + run -0 api '/v1/decisions?range=4.4.0.0/16&contains=false' + run -0 jq -r '.[0].value' <(output) + assert_output '4.4.4.0/24' +} + +# check subrange + +@test "CLI - decisions for range 4.4.4.2/2" { + run -0 cscli decisions list -r '4.4.4.2/28' -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output '4.4.4.0/24' +} + +@test "API - decisions for range 4.4.4.2/2" { + run -0 api '/v1/decisions?range=4.4.4.2/28' + run -0 jq -r '.[].value' <(output) + assert_output '4.4.4.0/24' +} + +@test "CLI - decisions for range 4.4.3.2/2" { + run -0 cscli decisions list -r '4.4.3.2/28' -o json + assert_output 'null' +} + +@test "API - decisions for range 4.4.3.2/2" { + run -0 api '/v1/decisions?range=4.4.3.2/28' + assert_output 'null' +} diff --git a/tests/bats/98_ipv6_range.bats b/tests/bats/98_ipv6_range.bats new file mode 100644 index 0000000..857ae1a --- /dev/null +++ b/tests/bats/98_ipv6_range.bats @@ -0,0 +1,217 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + ./instance-crowdsec start + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + CROWDSEC_API_URL="http://localhost:8080" + export CROWDSEC_API_URL +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + if is_db_mysql; then sleep 0.3; fi +} + +api() { + URI="$1" + curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" +} + +#---------- + +@test "cli - first decisions list: must be empty" { + # delete community pull + run -0 cscli decisions delete --all + run -0 cscli decisions list -o json + assert_output 'null' +} + +@test "adding decision for range aaaa:2222:3333:4444::/64" { + run -0 cscli decisions add -r 'aaaa:2222:3333:4444::/64' + assert_output --partial 'Decision successfully added' +} + +@test "CLI - all decisions (2)" { + run -0 cscli decisions list -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output 'aaaa:2222:3333:4444::/64' +} + +@test "API - all decisions (2)" { + run -0 api '/v1/decisions' + run -0 jq -r '.[].value' <(output) + assert_output 'aaaa:2222:3333:4444::/64' +} + +# check ip within/out of range + +@test "CLI - decisions for ip aaaa:2222:3333:4444:5555:6666:7777:8888" { + run -0 cscli decisions list -i 'aaaa:2222:3333:4444:5555:6666:7777:8888' -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output 'aaaa:2222:3333:4444::/64' +} + +@test "API - decisions for ip aaaa:2222:3333:4444:5555:6666:7777:8888" { + run -0 api '/v1/decisions?ip=aaaa:2222:3333:4444:5555:6666:7777:8888' + run -0 jq -r '.[].value' <(output) + assert_output 'aaaa:2222:3333:4444::/64' +} + +@test "CLI - decisions for ip aaaa:2222:3333:4445:5555:6666:7777:8888" { + run -0 cscli decisions list -i 'aaaa:2222:3333:4445:5555:6666:7777:8888' -o json + assert_output 'null' +} + +@test "API - decisions for ip aaaa:2222:3333:4445:5555:6666:7777:8888" { + run -0 api '/v1/decisions?ip=aaaa:2222:3333:4445:5555:6666:7777:8888' + assert_output 'null' +} + +@test "CLI - decisions for ip aaa1:2222:3333:4444:5555:6666:7777:8887" { + run -0 cscli decisions list -i 'aaa1:2222:3333:4444:5555:6666:7777:8887' -o json + assert_output 'null' +} + +@test "API - decisions for ip aaa1:2222:3333:4444:5555:6666:7777:8887" { + run -0 api '/v1/decisions?ip=aaa1:2222:3333:4444:5555:6666:7777:8887' + assert_output 'null' +} + +# check subrange within/out of range + +@test "CLI - decisions for range aaaa:2222:3333:4444:5555::/80" { + run -0 cscli decisions list -r 'aaaa:2222:3333:4444:5555::/80' -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output 'aaaa:2222:3333:4444::/64' +} + +@test "API - decisions for range aaaa:2222:3333:4444:5555::/80" { + run -0 api '/v1/decisions?range=aaaa:2222:3333:4444:5555::/80' + run -0 jq -r '.[].value' <(output) + assert_output 'aaaa:2222:3333:4444::/64' +} + +@test "CLI - decisions for range aaaa:2222:3333:4441:5555::/80" { + run -0 cscli decisions list -r 'aaaa:2222:3333:4441:5555::/80' -o json + assert_output 'null' + +} + +@test "API - decisions for range aaaa:2222:3333:4441:5555::/80" { + run -0 api '/v1/decisions?range=aaaa:2222:3333:4441:5555::/80' + assert_output 'null' +} + +@test "CLI - decisions for range aaa1:2222:3333:4444:5555::/80" { + run -0 cscli decisions list -r 'aaa1:2222:3333:4444:5555::/80' -o json + assert_output 'null' +} + +@test "API - decisions for range aaa1:2222:3333:4444:5555::/80" { + run -0 api '/v1/decisions?range=aaa1:2222:3333:4444:5555::/80' + assert_output 'null' +} + +# check outer range + +@test "CLI - decisions for range aaaa:2222:3333:4444:5555:6666:7777:8888/48" { + run -0 cscli decisions list -r 'aaaa:2222:3333:4444:5555:6666:7777:8888/48' -o json + assert_output 'null' +} + +@test "API - decisions for range aaaa:2222:3333:4444:5555:6666:7777:8888/48" { + run -0 api '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48' + assert_output 'null' +} + +@test "CLI - decisions for ip/range in aaaa:2222:3333:4444:5555:6666:7777:8888/48" { + run -0 cscli decisions list -r 'aaaa:2222:3333:4444:5555:6666:7777:8888/48' -o json --contained + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output 'aaaa:2222:3333:4444::/64' +} + +@test "API - decisions for ip/range in aaaa:2222:3333:4444:5555:6666:7777:8888/48" { + run -0 api '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48&contains=false' + run -0 jq -r '.[].value' <(output) + assert_output 'aaaa:2222:3333:4444::/64' +} + +@test "CLI - decisions for ip/range in aaaa:2222:3333:4445:5555:6666:7777:8888/48" { + run -0 cscli decisions list -r 'aaaa:2222:3333:4445:5555:6666:7777:8888/48' -o json + assert_output 'null' +} + +@test "API - decisions for ip/range in aaaa:2222:3333:4445:5555:6666:7777:8888/48" { + run -0 api '/v1/decisions?range=aaaa:2222:3333:4445:5555:6666:7777:8888/48' + assert_output 'null' +} + +# bbbb:db8:: -> bbbb:db8:0000:0000:0000:7fff:ffff:ffff + +@test "adding decision for range bbbb:db8::/81" { + run -0 cscli decisions add -r 'bbbb:db8::/81' + assert_output --partial 'Decision successfully added' +} + +@test "CLI - decisions for ip bbbb:db8:0000:0000:0000:6fff:ffff:ffff" { + run -0 cscli decisions list -o json -i 'bbbb:db8:0000:0000:0000:6fff:ffff:ffff' + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output 'bbbb:db8::/81' +} + +@test "API - decisions for ip in bbbb:db8:0000:0000:0000:6fff:ffff:ffff" { + run -0 api '/v1/decisions?ip=bbbb:db8:0000:0000:0000:6fff:ffff:ffff' + run -0 jq -r '.[].value' <(output) + assert_output 'bbbb:db8::/81' +} + +@test "CLI - decisions for ip bbbb:db8:0000:0000:0000:8fff:ffff:ffff" { + run -0 cscli decisions list -o json -i 'bbbb:db8:0000:0000:0000:8fff:ffff:ffff' + assert_output 'null' +} + +@test "API - decisions for ip in bbbb:db8:0000:0000:0000:8fff:ffff:ffff" { + run -0 api '/v1/decisions?ip=bbbb:db8:0000:0000:0000:8fff:ffff:ffff' + assert_output 'null' +} + +@test "deleting decision for range aaaa:2222:3333:4444:5555:6666:7777:8888/48" { + run -0 cscli decisions delete -r 'aaaa:2222:3333:4444:5555:6666:7777:8888/48' --contained + assert_output --partial '1 decision(s) deleted' +} + +@test "CLI - decisions for range aaaa:2222:3333:4444::/64 after delete" { + run -0 cscli decisions list -o json -r 'aaaa:2222:3333:4444::/64' + assert_output 'null' +} + +@test "adding decision for ip bbbb:db8:0000:0000:0000:8fff:ffff:ffff" { + run -0 cscli decisions add -i 'bbbb:db8:0000:0000:0000:8fff:ffff:ffff' + assert_output --partial 'Decision successfully added' +} + +@test "adding decision for ip bbbb:db8:0000:0000:0000:6fff:ffff:ffff" { + run -0 cscli decisions add -i 'bbbb:db8:0000:0000:0000:6fff:ffff:ffff' + assert_output --partial 'Decision successfully added' +} + +@test "deleting decisions for range bbbb:db8::/81" { + run -0 cscli decisions delete -r 'bbbb:db8::/81' --contained + assert_output --partial '2 decision(s) deleted' +} + +@test "CLI - all decisions (3)" { + run -0 cscli decisions list -o json + run -0 jq -r '.[].decisions[0].value' <(output) + assert_output 'bbbb:db8:0000:0000:0000:8fff:ffff:ffff' +} diff --git a/tests/bats/99_lapi-stream-mode-scenario.bats b/tests/bats/99_lapi-stream-mode-scenario.bats new file mode 100644 index 0000000..e0862e4 --- /dev/null +++ b/tests/bats/99_lapi-stream-mode-scenario.bats @@ -0,0 +1,233 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + ./instance-crowdsec start + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + CROWDSEC_API_URL="http://localhost:8080" + export CROWDSEC_API_URL +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + skip +} + +#---------- + +api() { + URI="$1" + curl -s -H "X-Api-Key:${API_KEY}" "${CROWDSEC_API_URL}${URI}" +} + +output_new_decisions() { + jq -c '.new | map(select(.origin!="CAPI")) | .[] | del(.id) | (.. | .duration?) |= capture("(?[[:digit:]]+h[[:digit:]]+m)").d' <(output) | sort +} + + +@test "adding decisions with different duration, scenario, origin" { + # origin: test + run -0 cscli decisions add -i 127.0.0.1 -d 1h -R crowdsecurity/test + ./instance-crowdsec stop + run -0 ./instance-db exec_sql "update decisions set origin='test' where origin='cscli'" + ./instance-crowdsec start + + run -0 cscli decisions add -i 127.0.0.1 -d 3h -R crowdsecurity/ssh_bf + ./instance-crowdsec stop + run -0 ./instance-db exec_sql "update decisions set origin='another_origin' where origin='cscli'" + ./instance-crowdsec start + + run -0 cscli decisions add -i 127.0.0.1 -d 5h -R crowdsecurity/longest + run -0 cscli decisions add -i 127.0.0.2 -d 3h -R crowdsecurity/test + run -0 cscli decisions add -i 127.0.0.2 -d 3h -R crowdsecurity/ssh_bf + run -0 cscli decisions add -i 127.0.0.2 -d 1h -R crowdsecurity/ssh_bf + ./instance-crowdsec stop + run -0 ./instance-db exec_sql "update decisions set origin='test' where origin='cscli'" + ./instance-crowdsec start + + # origin: another_origin + run -0 cscli decisions add -i 127.0.0.2 -d 2h -R crowdsecurity/test + ./instance-crowdsec stop + run -0 ./instance-db exec_sql "update decisions set origin='another_origin' where origin='cscli'" + ./instance-crowdsec start +} + +@test "test startup" { + run -0 api "/v1/decisions/stream?startup=true" + run -0 output_new_decisions + assert_output - <<-EOT + {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} + {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} + EOT +} + +@test "test startup with scenarios containing" { + run -0 api "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf" + run -0 output_new_decisions + assert_output - <<-EOT + {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} + {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.2"} + EOT +} + +@test "test startup with multiple scenarios containing" { + run -0 api "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf,test" + run -0 output_new_decisions + assert_output - <<-EOT + {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} + {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} + EOT +} + +@test "test startup with unknown scenarios containing" { + run -0 api "/v1/decisions/stream?startup=true&scenarios_containing=unknown" + assert_output '{"deleted":null,"new":null}' +} + +@test "test startup with scenarios containing and not containing" { + run -0 api "/v1/decisions/stream?startup=true&scenarios_containing=test&scenarios_not_containing=ssh_bf" + run -0 output_new_decisions + assert_output - <<-EOT + {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} + {"origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.1"} + EOT +} + +@test "test startup with scenarios containing and not containing 2" { + run -0 api "/v1/decisions/stream?startup=true&scenarios_containing=longest&scenarios_not_containing=ssh_bf,test" + run -0 output_new_decisions + assert_output - <<-EOT + {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} + EOT +} + +@test "test startup with scenarios not containing" { + run -0 api "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf" + run -0 output_new_decisions + assert_output - <<-EOT + {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} + {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} + EOT +} + +@test "test startup with multiple scenarios not containing" { + run -0 api "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf,test" + run -0 output_new_decisions + assert_output - <<-EOT + {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} + EOT +} + +@test "test startup with origins parameter" { + run -0 api "/v1/decisions/stream?startup=true&origins=another_origin" + run -0 output_new_decisions + assert_output - <<-EOT + {"duration":"1h59m","origin":"another_origin","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} + {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} + EOT +} + +@test "test startup with multiple origins parameter" { + run -0 api "/v1/decisions/stream?startup=true&origins=another_origin,test" + run -0 output_new_decisions + assert_output - <<-EOT + {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} + {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} + EOT +} + +@test "test startup with unknown origins" { + run -0 api "/v1/decisions/stream?startup=true&origins=unknown" + assert_output '{"deleted":null,"new":null}' +} + +#@test "delete decision 3 (127.0.0.1)" { +# +# { +# TestName: "delete decisions 3 (127.0.0.1)", +# Method: "DELETE", +# Route: "/v1/decisions/3", +# CheckCodeOnly: true, +# Code: 200, +# LenNew: 0, +# LenDeleted: 0, +# AuthType: PASSWORD, +# DelChecks: []DecisionCheck{}, +# NewChecks: []DecisionCheck{}, +# TestName: "check that 127.0.0.1 is not in deleted IP", +# Method: "GET", +# Route: "/v1/decisions/stream?startup=true", +# CheckCodeOnly: false, +# Code: 200, +# LenNew: 2, +# LenDeleted: 0, +# AuthType: APIKEY, +# DelChecks: []DecisionCheck{}, +# NewChecks: []DecisionCheck{}, +# }, +# { +# TestName: "delete decisions 2 (127.0.0.1)", +# Method: "DELETE", +# Route: "/v1/decisions/2", +# CheckCodeOnly: true, +# Code: 200, +# LenNew: 0, +# LenDeleted: 0, +# AuthType: PASSWORD, +# DelChecks: []DecisionCheck{}, +# NewChecks: []DecisionCheck{}, +# }, +# { +# TestName: "check that 127.0.0.1 is not in deleted IP", +# Method: "GET", +# Route: "/v1/decisions/stream?startup=true", +# CheckCodeOnly: false, +# Code: 200, +# LenNew: 2, +# LenDeleted: 0, +# AuthType: APIKEY, +# DelChecks: []DecisionCheck{}, +# NewChecks: []DecisionCheck{}, +# }, +# { +# TestName: "delete decisions 1 (127.0.0.1)", +# Method: "DELETE", +# Route: "/v1/decisions/1", +# CheckCodeOnly: true, +# Code: 200, +# LenNew: 0, +# LenDeleted: 0, +# AuthType: PASSWORD, +# DelChecks: []DecisionCheck{}, +# NewChecks: []DecisionCheck{}, +# }, +# TestName: "127.0.0.1 should be in deleted now", +# Method: "GET", +# Route: "/v1/decisions/stream?startup=true", +# CheckCodeOnly: false, +# Code: 200, +# LenNew: 1, +# LenDeleted: 1, +# AuthType: APIKEY, +# DelChecks: []DecisionCheck{ +# { +# ID: int64(1), +# Origin: "test", +# Scenario: "crowdsecurity/test", +# Value: "127.0.0.1", +# Duration: "-", // we check that the time is negative +# }, +# }, +# NewChecks: []DecisionCheck{}, +# }, +#} + diff --git a/tests/bats/99_lapi-stream-mode-scopes.bats b/tests/bats/99_lapi-stream-mode-scopes.bats new file mode 100644 index 0000000..60dd2e6 --- /dev/null +++ b/tests/bats/99_lapi-stream-mode-scopes.bats @@ -0,0 +1,64 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + ./instance-crowdsec start + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + CROWDSEC_API_URL="http://localhost:8080" + export CROWDSEC_API_URL +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" +} + +#---------- + +api() { + URI="$1" + curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" +} + +@test "adding decisions for multiple scopes" { + run -0 cscli decisions add -i '1.2.3.6' + assert_output --partial 'Decision successfully added' + run -0 cscli decisions add --scope user --value toto + assert_output --partial 'Decision successfully added' +} + +@test "stream start (implicit ip scope)" { + run -0 api "/v1/decisions/stream?startup=true" + run -0 jq -r '.new' <(output) + assert_output --partial '1.2.3.6' + refute_output --partial 'toto' +} + +@test "stream start (explicit ip scope)" { + run -0 api "/v1/decisions/stream?startup=true&scopes=ip" + run -0 jq -r '.new' <(output) + assert_output --partial '1.2.3.6' + refute_output --partial 'toto' +} + +@test "stream start (user scope)" { + run -0 api "/v1/decisions/stream?startup=true&scopes=user" + run -0 jq -r '.new' <(output) + refute_output --partial '1.2.3.6' + assert_output --partial 'toto' +} + +@test "stream start (user+ip scope)" { + run -0 api "/v1/decisions/stream?startup=true&scopes=user,ip" + run -0 jq -r '.new' <(output) + assert_output --partial '1.2.3.6' + assert_output --partial 'toto' +} diff --git a/tests/bats/99_lapi-stream-mode.bats b/tests/bats/99_lapi-stream-mode.bats new file mode 100644 index 0000000..082dbaf --- /dev/null +++ b/tests/bats/99_lapi-stream-mode.bats @@ -0,0 +1,73 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + ./instance-crowdsec start + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + CROWDSEC_API_URL="http://localhost:8080" + export CROWDSEC_API_URL +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" +} + +#---------- + +api() { + URI="$1" + curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" +} + +@test "adding decisions for multiple ips" { + run -0 cscli decisions add -i '1111:2222:3333:4444:5555:6666:7777:8888' + run -0 cscli decisions add -i '1.2.3.4' + run -0 cscli decisions add -r '1.2.4.0/24' + assert_output --partial 'Decision successfully added' +} + +@test "stream start" { + run -0 api "/v1/decisions/stream?startup=true" + if is_db_mysql; then sleep 3; fi + run -0 jq -r '.new' <(output) + assert_output --partial '1111:2222:3333:4444:5555:6666:7777:8888' + assert_output --partial '1.2.3.4' + assert_output --partial '1.2.4.0/24' +} + +@test "stream cont (add)" { + run -0 cscli decisions add -i '1.2.3.5' + if is_db_mysql; then sleep 3; fi + run -0 api "/v1/decisions/stream" + run -0 jq -r '.new' <(output) + assert_output --partial '1.2.3.5' +} + +@test "stream cont (del)" { + run -0 cscli decisions delete -i '1.2.3.4' + if is_db_mysql; then sleep 3; fi + run -0 api "/v1/decisions/stream" + run -0 jq -r '.deleted' <(output) + assert_output --partial '1.2.3.4' +} + +@test "stream restart" { + run -0 api "/v1/decisions/stream?startup=true" + api_out=${output} + run -0 jq -r '.deleted' <(output) + assert_output --partial '1.2.3.4' + output=${api_out} + run -0 jq -r '.new' <(output) + assert_output --partial '1111:2222:3333:4444:5555:6666:7777:8888' + assert_output --partial '1.2.3.5' + assert_output --partial '1.2.4.0/24' +} diff --git a/tests/bats/reformat b/tests/bats/reformat new file mode 100755 index 0000000..fe31fe9 --- /dev/null +++ b/tests/bats/reformat @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# from https://github.com/bats-core/bats-core/issues/192#issuecomment-528315083 +# thanks Sean Leather + +# Rewrite the Bats scripts in-place to look more like Bash scripts to shfmt +perl -pi -e 's/^(\@test.*) \{$/$1\n{/' ./*.bats + +tmpfile=$(mktemp) +for file in *bats; do + shfmt -i 4 -ln bash -s "${file}" > "${tmpfile}" + mv "${tmpfile}" "${file}" +done +rm -f "${tmpfile}" + +# Undo the changes to the Bats scripts in-place so that they work with Bats +perl -pi -e 's/^\{\R//; s/(\@test.*$)/$1 {/' ./*.bats diff --git a/tests/bats/testdata/cfssl/agent.json b/tests/bats/testdata/cfssl/agent.json new file mode 100644 index 0000000..693e3aa --- /dev/null +++ b/tests/bats/testdata/cfssl/agent.json @@ -0,0 +1,16 @@ +{ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "FR", + "L": "Paris", + "O": "Crowdsec", + "OU": "agent-ou", + "ST": "France" + } + ] + } \ No newline at end of file diff --git a/tests/bats/testdata/cfssl/agent_invalid.json b/tests/bats/testdata/cfssl/agent_invalid.json new file mode 100644 index 0000000..c61d4de --- /dev/null +++ b/tests/bats/testdata/cfssl/agent_invalid.json @@ -0,0 +1,16 @@ +{ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "FR", + "L": "Paris", + "O": "Crowdsec", + "OU": "this-is-not-the-ou-youre-looking-for", + "ST": "France" + } + ] + } \ No newline at end of file diff --git a/tests/bats/testdata/cfssl/bouncer.json b/tests/bats/testdata/cfssl/bouncer.json new file mode 100644 index 0000000..9a07f57 --- /dev/null +++ b/tests/bats/testdata/cfssl/bouncer.json @@ -0,0 +1,16 @@ +{ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "FR", + "L": "Paris", + "O": "Crowdsec", + "OU": "bouncer-ou", + "ST": "France" + } + ] + } \ No newline at end of file diff --git a/tests/bats/testdata/cfssl/bouncer_invalid.json b/tests/bats/testdata/cfssl/bouncer_invalid.json new file mode 100644 index 0000000..c61d4de --- /dev/null +++ b/tests/bats/testdata/cfssl/bouncer_invalid.json @@ -0,0 +1,16 @@ +{ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "FR", + "L": "Paris", + "O": "Crowdsec", + "OU": "this-is-not-the-ou-youre-looking-for", + "ST": "France" + } + ] + } \ No newline at end of file diff --git a/tests/bats/testdata/cfssl/ca.json b/tests/bats/testdata/cfssl/ca.json new file mode 100644 index 0000000..ed907e0 --- /dev/null +++ b/tests/bats/testdata/cfssl/ca.json @@ -0,0 +1,16 @@ +{ + "CN": "CrowdSec Test CA", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "FR", + "L": "Paris", + "O": "Crowdsec", + "OU": "Crowdsec", + "ST": "France" + } + ] +} \ No newline at end of file diff --git a/tests/bats/testdata/cfssl/intermediate.json b/tests/bats/testdata/cfssl/intermediate.json new file mode 100644 index 0000000..3996ce6 --- /dev/null +++ b/tests/bats/testdata/cfssl/intermediate.json @@ -0,0 +1,19 @@ +{ + "CN": "CrowdSec Test CA Intermediate", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "FR", + "L": "Paris", + "O": "Crowdsec", + "OU": "Crowdsec Intermediate", + "ST": "France" + } + ], + "ca": { + "expiry": "42720h" + } + } \ No newline at end of file diff --git a/tests/bats/testdata/cfssl/profiles.json b/tests/bats/testdata/cfssl/profiles.json new file mode 100644 index 0000000..d0dfced --- /dev/null +++ b/tests/bats/testdata/cfssl/profiles.json @@ -0,0 +1,44 @@ +{ + "signing": { + "default": { + "expiry": "8760h" + }, + "profiles": { + "intermediate_ca": { + "usages": [ + "signing", + "digital signature", + "key encipherment", + "cert sign", + "crl sign", + "server auth", + "client auth" + ], + "expiry": "8760h", + "ca_constraint": { + "is_ca": true, + "max_path_len": 0, + "max_path_len_zero": true + } + }, + "server": { + "usages": [ + "signing", + "digital signing", + "key encipherment", + "server auth" + ], + "expiry": "8760h" + }, + "client": { + "usages": [ + "signing", + "digital signature", + "key encipherment", + "client auth" + ], + "expiry": "8760h" + } + } + } + } \ No newline at end of file diff --git a/tests/bats/testdata/cfssl/server.json b/tests/bats/testdata/cfssl/server.json new file mode 100644 index 0000000..3701825 --- /dev/null +++ b/tests/bats/testdata/cfssl/server.json @@ -0,0 +1,20 @@ +{ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "FR", + "L": "Paris", + "O": "Crowdsec", + "OU": "Crowdsec Server", + "ST": "France" + } + ], + "hosts": [ + "127.0.0.1", + "localhost" + ] + } \ No newline at end of file diff --git a/tests/bin/assert-crowdsec-not-running b/tests/bin/assert-crowdsec-not-running new file mode 100755 index 0000000..c6f381a --- /dev/null +++ b/tests/bin/assert-crowdsec-not-running @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +is_crowdsec_running() { + PIDS=$(pgrep -x 'crowdsec|crowdsec.test|crowdsec.cover') +} + +# The process can be slow, especially on CI and during test coverage. +# Give it some time, maybe it's quitting soon. +for _i in {1..10}; do + is_crowdsec_running || exit 0 + sleep .5 +done + +PIDS=$(echo "${PIDS}" | tr '\n' ' ') +msg="CrowdSec is already running (PID ${PIDS}). Please terminate it and run the tests again." + +# Are we inside a setup() or @test? Is file descriptor 3 open? +if { true >&3; } 2>/dev/null; then + echo "${msg}" >&3 +else + echo "${msg}" >&2 +fi + +# cause the calling setup() or @test to fail +exit 1 diff --git a/tests/bin/check-requirements b/tests/bin/check-requirements new file mode 100755 index 0000000..f6889fc --- /dev/null +++ b/tests/bin/check-requirements @@ -0,0 +1,110 @@ +#!/usr/bin/env bash + +set -eu + +die() { + echo >&2 "$@" + exit 1 +} + +# shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +# shellcheck source=../.environment.sh +. "${THIS_DIR}/../.environment.sh" + +check_bats_core() { + if ! "${TEST_DIR}/lib/bats-core/bin/bats" --version >/dev/null 2>&1; then + die "ERROR: the bats-* submodules are required. Please run 'git submodule init; git submodule update' and retry." + fi +} + +check_curl() { + if ! command -v curl >/dev/null; then + die "missing required program 'curl'" + fi +} + +check_python3() { + if ! command -v python3 >/dev/null; then + die "missing required program 'python3'" + fi +} + +check_jq() { + if ! command -v jq >/dev/null; then + die "Missing required program 'jq'" + fi +} + +check_nc() { + if ! command -v nc >/dev/null; then + die "missing required program 'nc' (package 'netcat-openbsd')" + fi +} + +check_base64() { + if ! command -v base64 >/dev/null; then + die "missing required program 'base64'" + fi +} + +check_pkill() { + if ! command -v pkill >/dev/null; then + die "missing required program 'pkill'" + fi +} + +check_yq() { + # shellcheck disable=SC2016 + howto_install='You can install it with your favorite package manager (including snap) or with "go install github.com/mikefarah/yq/v4@latest" and add ~/go/bin to $PATH.' + if ! command -v yq >/dev/null; then + die "Missing required program 'yq'. ${howto_install}" + fi + if ! (yq --version | grep mikefarah >/dev/null); then + die "yq exists but it's not the one we need (mikefarah/yq). ${howto_install}" + fi +} + +check_daemonizer() { + if ! command -v daemonize >/dev/null; then + die "missing required program 'daemonize' (package 'daemonize' or 'https://github.com/bmc/daemonize')" + fi +} + +check_cfssl() { + # shellcheck disable=SC2016 + howto_install='You can install it with "go install github.com/cloudflare/cfssl/cmd/cfssl@latest" and add ~/go/bin to $PATH.' + if ! command -v cfssl >/dev/null; then + die "Missing required program 'cfssl'. ${howto_install}" + fi +} + +check_cfssljson() { + # shellcheck disable=SC2016 + howto_install='You can install it with "go install github.com/cloudflare/cfssl/cmd/cfssljson@latest" and add ~/go/bin to $PATH.' + if ! command -v cfssljson >/dev/null; then + die "Missing required program 'cfssljson'. ${howto_install}" + fi +} + +check_gocovmerge() { + if ! command -v gocovmerge >/dev/null; then + die "missing required program 'gocovmerge'. You can install it with \"go install github.com/wadey/gocovmerge@latest\"" + fi +} + +check_bats_core +check_curl +check_daemonizer +check_cfssl +check_cfssljson +check_jq +check_nc +check_base64 +check_python3 +check_yq +check_pkill +if [[ -n "${TEST_COVERAGE}" ]]; then + check_gocovmerge +fi + diff --git a/tests/bin/collect-hub-coverage b/tests/bin/collect-hub-coverage new file mode 100755 index 0000000..05c4e06 --- /dev/null +++ b/tests/bin/collect-hub-coverage @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +set -eu + +die() { + echo >&2 "$@" + exit 1 +} + +# shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +# shellcheck disable=SC1091 +. "${THIS_DIR}/../.environment.sh" + +hubdir="${LOCAL_DIR}/hub-tests" + +coverage() { + "${CSCLI}" --crowdsec "${CROWDSEC}" --cscli "${CSCLI}" hubtest coverage --"$1" --percent +} + +cd "${hubdir}" || die "Could not find hub test results" + +shopt -s inherit_errexit + +echo "PARSERS_COV=$(coverage parsers | cut -d = -f2)" +echo "SCENARIOS_COV=$(coverage scenarios | cut -d = -f2)" + +PARSERS_COV_NUMBER=$(coverage parsers | tr -d '%[[:space:]]') +SCENARIOS_COV_NUMBER=$(coverage scenarios | tr -d '%[[:space:]]') + +echo "PARSERS_BADGE_COLOR=$(if [[ PARSERS_COV_NUMBER -lt 70 ]]; then echo 'red'; else echo 'green'; fi)" +echo "SCENARIOS_BADGE_COLOR=$(if [[ SCENARIOS_COV_NUMBER -lt 70 ]]; then echo 'red'; else echo 'green'; fi)" diff --git a/tests/bin/crowdsec-wrapper b/tests/bin/crowdsec-wrapper new file mode 100755 index 0000000..8477076 --- /dev/null +++ b/tests/bin/crowdsec-wrapper @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +set -eu + +# +# Delegate operations to an instrumented binary and collects coverage data. +# + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +# no need to change directory, and doing it here would break hub tests +# shellcheck disable=SC1091 +. "${THIS_DIR}/../.environment.sh" + +set -o pipefail # don't let sed hide the statuscode +mkdir -p "${LOCAL_DIR}/var/lib/coverage" + +# this would be nice but doesn't work, since the binary is not running in background +#_hup() { +# echo "pkill -1 crowdsec.cover" +# pkill -HUP crowdsec.cover +#} +# +## relay the "configuration reload" signal +#trap _hup SIGHUP + +# we collect rc and output by hand, because setting -o pipefail would trigger a +# SIGPIPE. +set +e + +# Arguments to crowdsec are passed through a temporary, newline-delimited +# file courtesy of github.com/confluentinc/bincover. Coverage data will be +# merged at the end of the test run. +# The '=' between flags and values is required. +output=$("${BIN_DIR}/crowdsec.cover" \ + -test.run="^TestBincoverRunMain$" \ + -test.coverprofile="${LOCAL_DIR}/var/lib/coverage/crowdsec-$(date +'%s')-$$-${RANDOM}.out" \ + -args-file=<(for i; do echo "${i}"; done)) +rc=$? + +# If there is bincover metadata, we take the status code from there. Otherwise, +# we keep the status from the above command. +if [[ ${output} =~ (.*)(START_BINCOVER_METADATA[[:space:]]*)(.*)([[:space:]]END_BINCOVER_METADATA) ]]; then + echo -n "${BASH_REMATCH[1]}" + exit "$(jq '.exit_code' <<< "${BASH_REMATCH[3]}")" +fi + +echo -n "${output}" +exit "${rc}" diff --git a/tests/bin/cscli-wrapper b/tests/bin/cscli-wrapper new file mode 100755 index 0000000..58831c5 --- /dev/null +++ b/tests/bin/cscli-wrapper @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -eu + +# +# Delegate operations to an instrumented binary and collects coverage data. +# + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +# no need to change directory, and doing it here would break hub tests +# shellcheck disable=SC1091 +. "${THIS_DIR}/../.environment.sh" + +set -o pipefail # don't let sed hide the statuscode +mkdir -p "${LOCAL_DIR}/var/lib/coverage" + +# we collect rc and output by hand, because setting -o pipefail would trigger a +# SIGPIPE. +set +e + +# Arguments to cscli are passed through a temporary, newline-delimited +# file courtesy of github.com/confluentinc/bincover. Coverage data will be +# merged at the end of the test run. +# The '=' between flags and values is required. +output=$("${BIN_DIR}/cscli.cover" \ + -test.run="^TestBincoverRunMain$" \ + -test.coverprofile="${LOCAL_DIR}/var/lib/coverage/cscli-$(date +'%s')-$$-${RANDOM}.out" \ + -args-file=<(for i; do echo "${i}"; done)) +rc=$? + +# If there is bincover metadata, we take the status code from there. Otherwise, +# we keep the status from the above command. +if [[ ${output} =~ (.*)(START_BINCOVER_METADATA[[:space:]]*)(.*)([[:space:]]END_BINCOVER_METADATA) ]]; then + echo -n "${BASH_REMATCH[1]}" + exit "$(jq '.exit_code' <<< "${BASH_REMATCH[3]}")" +fi + +echo -n "${output}" +exit "${rc}" diff --git a/tests/bin/generate-hub-tests b/tests/bin/generate-hub-tests new file mode 100755 index 0000000..2103128 --- /dev/null +++ b/tests/bin/generate-hub-tests @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +set -eu + +# shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +# shellcheck disable=SC1091 +. "${THIS_DIR}/../.environment.sh" + +cscli() { + "${CSCLI}" "$@" +} + +"${TEST_DIR}/instance-data" load + +hubdir="${LOCAL_DIR}/hub-tests" +git clone --depth 1 https://github.com/crowdsecurity/hub.git "${hubdir}" >/dev/null 2>&1 || (cd "${hubdir}"; git pull) + +HUBTESTS_BATS="${TEST_DIR}/dyn-bats/hub.bats" + +cat << EOT > "${HUBTESTS_BATS}" +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" +} + +EOT + +echo "Generating hub tests..." + +for testname in $("${CSCLI}" --crowdsec "${CROWDSEC}" --cscli "${CSCLI}" hubtest --hub "${hubdir}" list -o json | jq -r '.[] | .Name'); do + cat << EOT >> "${HUBTESTS_BATS}" + +@test "${testname}" { + run "\${CSCLI}" --crowdsec "\${CROWDSEC}" --cscli "\${CSCLI}" --hub "${hubdir}" hubtest run "${testname}" --clean + # in case of error, need to see what went wrong + echo "\$output" + assert_success +} +EOT +done diff --git a/tests/bin/mock-http.py b/tests/bin/mock-http.py new file mode 100644 index 0000000..3f26271 --- /dev/null +++ b/tests/bin/mock-http.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 + +import json +import logging +import sys + +from http.server import HTTPServer, BaseHTTPRequestHandler + +class RequestHandler(BaseHTTPRequestHandler): + def do_POST(self): + request_path = self.path + request_body = self.rfile.read(int(self.headers['Content-Length'])) + request_body = json.loads(request_body.decode()) + log = { + "path": request_path, + "status": 200, + "request_body": request_body, + } + print(json.dumps(log)) + self.send_response(200) + self.send_header('Content-type','application/json') + self.end_headers() + self.wfile.write(json.dumps({}).encode()) + self.wfile.flush() + return + + def log_message(self, format, *args): + return + +def main(argv): + try: + port = int(argv[1]) + except IndexError: + logging.fatal("Missing port number") + return 1 + except ValueError: + logging.fatal("Invalid port number '%s'", argv[1]) + return 1 + server = HTTPServer(('', port), RequestHandler) + # logging.info('Listening on port %s', port) + server.serve_forever() + return 0 + + +if __name__ == "__main__" : + logging.basicConfig(level=logging.INFO) + sys.exit(main(sys.argv)) diff --git a/tests/bin/wait-for-port b/tests/bin/wait-for-port new file mode 100755 index 0000000..4c6c55b --- /dev/null +++ b/tests/bin/wait-for-port @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -eu + +script_name=$0 + +die() { + echo >&2 "$@" + exit 1 +} + +about() { + die "usage: ${script_name} [-q] " +} + +[[ $# -lt 1 ]] && about + +QUIET= +if [[ "$1" == "-q" ]]; then + QUIET=quiet + shift +fi + +[[ $# -lt 1 ]] && about + +port_number=$1 + +# 4 seconds may seem long, but the tests must work on embedded, slow arm boxes too +for _ in $(seq 40); do + nc -z localhost "${port_number}" >/dev/null 2>&1 && exit 0 + sleep .1 +done + +# send to &3 if open +if { true >&3; } 2>/dev/null; then + [[ -z "${QUIET}" ]] && echo "Can't connect to port ${port_number}" >&3 +else + [[ -z "${QUIET}" ]] && echo "Can't connect to port ${port_number}" >&2 +fi + +exit 1 + diff --git a/tests/disable-capi b/tests/disable-capi new file mode 100755 index 0000000..f19bef5 --- /dev/null +++ b/tests/disable-capi @@ -0,0 +1,8 @@ +#!/bin/bash + +# shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +# shellcheck disable=SC1091 +. "${THIS_DIR}/.environment.sh" + +yq e 'del(.api.server.online_client)' -i "${CONFIG_YAML}" diff --git a/tests/dyn-bats/README.md b/tests/dyn-bats/README.md new file mode 100644 index 0000000..1e4dec1 --- /dev/null +++ b/tests/dyn-bats/README.md @@ -0,0 +1,2 @@ +This directory is for dynamically generated tests. Do not commit them. +Any `*.bats` file here will be removed by the Makefile. diff --git a/tests/enable-capi b/tests/enable-capi new file mode 100755 index 0000000..ddbf876 --- /dev/null +++ b/tests/enable-capi @@ -0,0 +1,11 @@ +#!/bin/bash + +# shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +# shellcheck disable=SC1091 +. "${THIS_DIR}/.environment.sh" + +online_api_credentials="$(dirname "${CONFIG_YAML}")/online_api_credentials.yaml" +export online_api_credentials + +yq e '.api.server.online_client.credentials_path=strenv(online_api_credentials)' -i "${CONFIG_YAML}" diff --git a/tests/instance-crowdsec b/tests/instance-crowdsec new file mode 100755 index 0000000..d87145c --- /dev/null +++ b/tests/instance-crowdsec @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +cd "${THIS_DIR}" || exit 1 +# shellcheck disable=SC1091 +. ./.environment.sh + +backend_script="./lib/init/crowdsec-${INIT_BACKEND}" + +if [[ ! -x "${backend_script}" ]]; then + echo "unknown init system '${INIT_BACKEND}'" >&2 + exit 1 +fi + +exec "${backend_script}" "$@" diff --git a/tests/instance-data b/tests/instance-data new file mode 100755 index 0000000..02742b4 --- /dev/null +++ b/tests/instance-data @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +cd "${THIS_DIR}" || exit 1 +# shellcheck disable=SC1091 +. ./.environment.sh + +backend_script="./lib/config/config-${CONFIG_BACKEND}" + +if [[ ! -x "${backend_script}" ]]; then + echo "unknown config backend '${CONFIG_BACKEND}'" >&2 + exit 1 +fi + +exec "${backend_script}" "$@" diff --git a/tests/instance-db b/tests/instance-db new file mode 100755 index 0000000..fbbc18d --- /dev/null +++ b/tests/instance-db @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +cd "${THIS_DIR}" || exit 1 +# shellcheck disable=SC1091 +. ./.environment.sh + +./bin/assert-crowdsec-not-running + +backend_script="./lib/db/instance-${DB_BACKEND}" + +if [[ ! -x "${backend_script}" ]]; then + echo "unknown database '${DB_BACKEND}'" >&2 + exit 1 +fi + +exec "${backend_script}" "$@" diff --git a/tests/instance-mock-http b/tests/instance-mock-http new file mode 100755 index 0000000..cca19b7 --- /dev/null +++ b/tests/instance-mock-http @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +set -eu + +die() { + echo >&2 "$@" + exit 1 +} + +about() { + die "usage: $0 [ start | stop ]" +} + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +cd "${THIS_DIR}" +# shellcheck disable=SC1091 +. ./.environment.sh + +# you have not removed set -u above, have you? + +[[ -z "${LOG_DIR-}" ]] && die "\$LOG_DIR must be defined." +[[ -z "${PID_DIR-}" ]] && die "\$PID_DIR must be defined." + +if ! command -v python3 >/dev/null 2>&2; then + die "The python3 executable is is missing. Please install it and try again." +fi + +DAEMON_PID=${PID_DIR}/mock-http.pid + +start_instance() { + [[ $# -lt 1 ]] && about + daemonize \ + -p "${DAEMON_PID}" \ + -e "${LOG_DIR}/mock-http.err" \ + -o "${LOG_DIR}/mock-http.out" \ + /usr/bin/env python3 -u "${THIS_DIR}/bin/mock-http.py" "$1" + ./bin/wait-for-port "$1" +# echo "mock http started on port $1" +} + +stop_instance() { + if [[ -f "${DAEMON_PID}" ]]; then + # terminate with extreme prejudice, all the application data will be thrown away anyway + kill -9 "$(cat "${DAEMON_PID}")" > /dev/null 2>&1 + rm -f -- "${DAEMON_PID}" + fi +} + + +# --------------------------- + +[[ $# -lt 1 ]] && about + +case "$1" in + start) + shift + start_instance "$@" + ;; + stop) + stop_instance + ;; + *) + about + ;; +esac; + diff --git a/tests/lib/config/config-global b/tests/lib/config/config-global new file mode 100755 index 0000000..c2f0767 --- /dev/null +++ b/tests/lib/config/config-global @@ -0,0 +1,119 @@ +#!/usr/bin/env bash + +set -eu +script_name=$0 + +die() { + echo >&2 "$@" + exit 1 +} + +about() { + die "usage: ${script_name} [make | load | clean]" +} + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +cd "${THIS_DIR}"/../../ +#shellcheck disable=SC1091 +. ./.environment.sh + +# you have not removed set -u above, have you? + +[[ -z "${TEST_DIR-}" ]] && die "\$TEST_DIR must be defined." +[[ -z "${LOCAL_DIR-}" ]] && die "\$LOCAL_DIR must be defined." +[[ -z "${CSCLI-}" ]] && die "\$CSCLI must be defined." +[[ -z "${LOCAL_INIT_DIR-}" ]] && die "\$LOCAL_INIT_DIR must be defined." +[[ -z "${PLUGIN_DIR-}" ]] && die "\$PLUGIN_DIR must be defined." +[[ -z "${DB_BACKEND-}" ]] && die "\$DB_BACKEND must be defined." + +if [[ ! -f "${CSCLI}" ]]; then + die "${CSCLI} is missing. Please build (with 'make bats-build') or install it." +fi + +REL_CONFIG_DIR="etc/crowdsec" +REL_DATA_DIR="var/lib/crowdsec/data" + +DATA_DIR="${LOCAL_DIR}/${REL_DATA_DIR}" +export DATA_DIR +CONFIG_DIR="${LOCAL_DIR}/${REL_CONFIG_DIR}" +export CONFIG_DIR + +if [[ $(uname) == "OpenBSD" ]]; then + TAR=gtar +else + TAR=tar +fi + +remove_init_data() { + ./bin/assert-crowdsec-not-running || die "Cannot remove fixture data." + rm -rf -- "${LOCAL_DIR:?}/${REL_CONFIG_DIR}"/* "${LOCAL_DIR:?}/${REL_DATA_DIR:?}"/* +} + +# we need a separate function for initializing config when testing package +# because we want to test the configuration as well +make_init_data() { + ./bin/assert-crowdsec-not-running || die "Cannot create fixture data." + + ./instance-db config-yaml + ./instance-db setup + + # when installed packages are always using sqlite, so no need to regenerate + # local credz for sqlite + + [[ "${DB_BACKEND}" == "sqlite" ]] || ${CSCLI} machines add --auto + + mkdir -p "${LOCAL_INIT_DIR}" + + ./instance-db dump "${LOCAL_INIT_DIR}/database" + + echo "${DB_BACKEND}" > "${LOCAL_INIT_DIR}/.backend" + + # disable CAPI by default + yq e 'del(.api.server.online_client)' -i "${CONFIG_DIR}/config.yaml" + + "${TAR}" -C "${LOCAL_DIR}" --create \ + --exclude "${REL_DATA_DIR}"/crowdsec.db \ + --file "${LOCAL_INIT_DIR}/init-config-data.tar" "${REL_CONFIG_DIR}" "${REL_DATA_DIR}" +} + +load_init_data() { + ./bin/assert-crowdsec-not-running || die "Cannot load fixture data." + + if [[ ! -f "${LOCAL_INIT_DIR}/init-config-data.tar" ]]; then + die "Initial data not found; did you run '${script_name} make' ?" + fi + + dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")" + if [[ "${DB_BACKEND}" != "${dump_backend}" ]]; then + die "Can't run with backend '${DB_BACKEND}' because the test data was built with '${dump_backend}'" + fi + + remove_init_data + + "${TAR}" -C "${LOCAL_DIR}" --extract --file "${LOCAL_INIT_DIR}/init-config-data.tar" + + ./instance-db restore "${LOCAL_INIT_DIR}/database" +} + + +# --------------------------- + +[[ $# -lt 1 ]] && about + +case "$1" in + make) + "${TEST_DIR}/instance-crowdsec" stop + make_init_data + ;; + load) + load_init_data + ;; + clean) + remove_init_data + ;; + *) + about + ;; +esac; + diff --git a/tests/lib/config/config-local b/tests/lib/config/config-local new file mode 100755 index 0000000..7d84ac7 --- /dev/null +++ b/tests/lib/config/config-local @@ -0,0 +1,174 @@ +#!/usr/bin/env bash + +set -eu +script_name=$0 + +die() { + echo >&2 "$@" + exit 1 +} + +about() { + die "usage: ${script_name} [make | load | clean]" +} + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +cd "${THIS_DIR}"/../../ +#shellcheck disable=SC1091 +. ./.environment.sh + +# you have not removed set -u above, have you? + +[[ -z "${TEST_DIR-}" ]] && die "\$TEST_DIR must be defined." +[[ -z "${LOCAL_DIR-}" ]] && die "\$LOCAL_DIR must be defined." +[[ -z "${CSCLI-}" ]] && die "\$CSCLI must be defined." +[[ -z "${LOCAL_INIT_DIR-}" ]] && die "\$LOCAL_INIT_DIR must be defined." +[[ -z "${PLUGIN_DIR-}" ]] && die "\$PLUGIN_DIR must be defined." +[[ -z "${DB_BACKEND-}" ]] && die "\$DB_BACKEND must be defined." + +if [[ ! -f "${CSCLI}" ]]; then + die "${CSCLI} is missing. Please build (with 'make bats-build') or install it." +fi + +REL_CONFIG_DIR="etc/crowdsec" +REL_DATA_DIR="var/lib/crowdsec/data" + +DATA_DIR="${LOCAL_DIR}/${REL_DATA_DIR}" +export DATA_DIR +CONFIG_DIR="${LOCAL_DIR}/${REL_CONFIG_DIR}" +export CONFIG_DIR +HUB_DIR="${CONFIG_DIR}/hub" +export HUB_DIR + +if [[ $(uname) == "OpenBSD" ]]; then + TAR=gtar +else + TAR=tar +fi + +remove_init_data() { + ./bin/assert-crowdsec-not-running || die "Cannot remove fixture data." + rm -rf -- "${LOCAL_DIR:?}/${REL_CONFIG_DIR}"/* "${LOCAL_DIR:?}/${REL_DATA_DIR:?}"/* +} + +config_generate() { + mkdir -p "${CONFIG_DIR}" + + cp ../config/profiles.yaml \ + ../config/simulation.yaml \ + ../config/local_api_credentials.yaml \ + ../config/online_api_credentials.yaml \ + "${CONFIG_DIR}/" + + # the default acquis file contains files that are not readable by everyone + touch "$LOG_DIR/empty.log" + cat <<-EOT >"$CONFIG_DIR/acquis.yaml" + source: file + filenames: + - $LOG_DIR/empty.log + labels: + type: syslog + EOT + + cp ../plugins/notifications/*/{http,email,slack,splunk,dummy}.yaml \ + "${CONFIG_DIR}/notifications/" + + yq e ' + .common.daemonize=true | + del(.common.pid_dir) | + .common.log_level="info" | + .common.force_color_logs=true | + .common.log_dir=strenv(LOG_DIR) | + .config_paths.config_dir=strenv(CONFIG_DIR) | + .config_paths.data_dir=strenv(DATA_DIR) | + .config_paths.simulation_path=strenv(CONFIG_DIR)+"/simulation.yaml" | + .config_paths.hub_dir=strenv(HUB_DIR) | + .config_paths.index_path=strenv(HUB_DIR)+"/.index.json" | + .config_paths.notification_dir=strenv(CONFIG_DIR)+"/notifications" | + .config_paths.plugin_dir=strenv(PLUGIN_DIR) | + .crowdsec_service.acquisition_path=strenv(CONFIG_DIR)+"/acquis.yaml" | + .crowdsec_service.acquisition_dir=strenv(CONFIG_DIR)+"/acquis.d" | + .db_config.db_path=strenv(DATA_DIR)+"/crowdsec.db" | + .db_config.use_wal=true | + .api.client.credentials_path=strenv(CONFIG_DIR)+"/local_api_credentials.yaml" | + .api.server.profiles_path=strenv(CONFIG_DIR)+"/profiles.yaml" | + .api.server.console_path=strenv(CONFIG_DIR)+"/console.yaml" | + .api.server.online_client.credentials_path=strenv(CONFIG_DIR)+"/online_api_credentials.yaml" + ' ../config/config.yaml >"${CONFIG_DIR}/config.yaml" +} + + +make_init_data() { + ./bin/assert-crowdsec-not-running || die "Cannot create fixture data." + + remove_init_data + mkdir -p "${DATA_DIR}" + mkdir -p "${CONFIG_DIR}/notifications" + mkdir -p "${CONFIG_DIR}/hub" + mkdir -p "${CONFIG_DIR}/patterns" + cp -a "../config/patterns" "${CONFIG_DIR}/" + config_generate + # XXX errors from instance-db should be reported... + ./instance-db config-yaml + ./instance-db setup + + "${CSCLI}" machines add githubciXXXXXXXXXXXXXXXXXXXXXXXX --auto + "${CSCLI}" hub update + "${CSCLI}" collections install crowdsecurity/linux + + mkdir -p "${LOCAL_INIT_DIR}" + + ./instance-db dump "${LOCAL_INIT_DIR}/database" + + echo "${DB_BACKEND}" > "${LOCAL_INIT_DIR}/.backend" + + # disable CAPI by default + yq e 'del(.api.server.online_client)' -i "${CONFIG_DIR}/config.yaml" + + "${TAR}" -C "${LOCAL_DIR}" --create \ + --exclude "${REL_DATA_DIR}"/crowdsec.db \ + --file "${LOCAL_INIT_DIR}/init-config-data.tar" "${REL_CONFIG_DIR}" "${REL_DATA_DIR}" + + remove_init_data +} + +load_init_data() { + ./bin/assert-crowdsec-not-running || die "Cannot load fixture data." + + if [[ ! -f "${LOCAL_INIT_DIR}/init-config-data.tar" ]]; then + die "Initial data not found; did you run '${script_name} make' ?" + fi + + dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")" + if [[ "${DB_BACKEND}" != "${dump_backend}" ]]; then + die "Can't run with backend '${DB_BACKEND}' because the test data was built with '${dump_backend}'" + fi + + remove_init_data + + "${TAR}" -C "${LOCAL_DIR}" --extract --file "${LOCAL_INIT_DIR}/init-config-data.tar" + + ./instance-db restore "${LOCAL_INIT_DIR}/database" +} + + +# --------------------------- + +[[ $# -lt 1 ]] && about + +case "$1" in + make) + make_init_data + ;; + load) + load_init_data + ;; + clean) + remove_init_data + ;; + *) + about + ;; +esac; + diff --git a/tests/lib/db/instance-mysql b/tests/lib/db/instance-mysql new file mode 100755 index 0000000..6b40c84 --- /dev/null +++ b/tests/lib/db/instance-mysql @@ -0,0 +1,124 @@ +#!/usr/bin/env bash + +set -eu +script_name=$0 +DB_BACKEND=$(echo "${script_name}" | cut -d- -f2) +export DB_BACKEND + +die() { + echo >&2 "$@" + exit 1 +} + +MYSQL_HOST=${MYSQL_HOST:-127.0.0.1} +MYSQL_PORT=${MYSQL_PORT:-3306} +MYSQL_PASSWORD=${MYSQL_PASSWORD:-password} +MYSQL_USER=${MYSQL_USER:-root} + +about() { + die "usage: ${script_name} [ config_yaml | setup | dump | restore ]" +} + +check_requirements() { + if ! command -v mysql >/dev/null; then + die "missing required program 'mysql' as a mysql client (package mariadb-client-core-10.6 on debian like system)" + fi +} + +silence_password_warning() { + ( ( ( "$@" >&9 ) 2>&1 \ + | grep -F -v "[Warning] Using a password on the command line interface can be insecure." ) >&2 ) 9>&1 || [[ $? == 1 ]] +} + +exec_sql() { + cmd="${1?Missing required sql command}" + + silence_password_warning \ + mysql \ + "--host=${MYSQL_HOST}" \ + "--user=${MYSQL_USER}" \ + "--port=${MYSQL_PORT}" \ + "--password=${MYSQL_PASSWORD}" <<< "${cmd}" +} + +setup() { + exec_sql "DROP DATABASE IF EXISTS crowdsec_test;" + exec_sql "CREATE DATABASE crowdsec_test;" + exec_sql "DROP USER IF EXISTS crowdsec_test;" + exec_sql "CREATE USER 'crowdsec_test' IDENTIFIED BY 'crowdsec_test';" + exec_sql "GRANT ALL PRIVILEGES ON crowdsec_test.* TO 'crowdsec_test';" +} + +dump() { + backup_file="${1?Missing file to backup database to}" + + args=(mysqldump) + if mysqldump --column-statistics 2>&1 | grep -q -v 'unknown option'; then + args+=("--column-statistics=0") + fi + args+=("--host=${MYSQL_HOST}" "--port=${MYSQL_PORT}" "--user=${MYSQL_USER}" "--password=${MYSQL_PASSWORD}" --databases crowdsec_test) + + silence_password_warning "${args[@]}" > "${backup_file}" +} + +restore() { + backup_file="${1?missing file to restore database from}" + [[ -f "${backup_file}" ]] || die "Backup file ${backup_file} doesn't exist" + + silence_password_warning \ + mysql \ + "--host=${MYSQL_HOST}" \ + "--user=${MYSQL_USER}" \ + "--port=${MYSQL_PORT}" \ + "--password=${MYSQL_PASSWORD}" < "${backup_file}" + + exec_sql "DROP USER IF EXISTS 'crowdsec_test';" + exec_sql "CREATE USER 'crowdsec_test' IDENTIFIED BY 'crowdsec_test';" + exec_sql "GRANT ALL PRIVILEGES ON crowdsec_test.* TO 'crowdsec_test';" +} + +config_yaml() { + MYSQL_PORT=${MYSQL_PORT} MYSQL_HOST=${MYSQL_HOST} yq e ' + .db_config.type=strenv(DB_BACKEND)| + .db_config.user="crowdsec_test" | + .db_config.password="crowdsec_test" | + .db_config.db_name="crowdsec_test" | + .db_config.host=strenv(MYSQL_HOST) | + .db_config.port=env(MYSQL_PORT) | + del(.db_config.db_path) + ' -i "${CONFIG_YAML}" +} + +[[ $# -lt 1 ]] && about + +check_requirements + +case "$1" in + setup) + setup + ;; + config-yaml) + config_yaml + ;; + dump) + shift + dump "$@" + ;; + restore) + shift + restore "$@" + ;; + exec_sql) + shift + # + # This command is meant to run a query against the the crowdsec database. + # The exec_sql() function is more generic and is also used for database setup and backups. + # + # For this reason, we select the database here. + # + exec_sql "use crowdsec_test; $@" + ;; + *) + about + ;; +esac; diff --git a/tests/lib/db/instance-pgx b/tests/lib/db/instance-pgx new file mode 120000 index 0000000..4ccd544 --- /dev/null +++ b/tests/lib/db/instance-pgx @@ -0,0 +1 @@ +instance-postgres \ No newline at end of file diff --git a/tests/lib/db/instance-postgres b/tests/lib/db/instance-postgres new file mode 100755 index 0000000..d1ebf81 --- /dev/null +++ b/tests/lib/db/instance-postgres @@ -0,0 +1,101 @@ +#!/usr/bin/env bash + +set -eu +script_name=$0 +DB_BACKEND=$(echo "${script_name}" | cut -d- -f2) +export DB_BACKEND + +die() { + echo >&2 "$@" + exit 1 +} + +PGHOST=${PGHOST:-127.0.0.1} +PGPORT=${PGPORT:-5432} +PGPASSWORD=${PGPASSWORD:-postgres} +PGUSER=${PGUSER:-postgres} +export PGHOST +export PGPORT +export PGPASSWORD +export PGUSER + +about() { + die "usage: ${script_name} [ config_yaml | setup | dump | restore ]" +} + +check_requirements() { + if ! command -v psql >/dev/null; then + die "missing required program 'psql' as a postgres client (package postgressql-client on debian like system)" + fi + if ! command -v pg_dump >/dev/null; then + die "missing required program 'pg_dump' (package postgresql-client on debian like system)" + fi + if ! command -v pg_restore >/dev/null; then + die "missing required program 'pg_restore' (package postgresql-client on debian like system)" + fi +} + +exec_sql() { + cmd="${1?Missing required sql command}" + psql <<< "${cmd}" +} + +setup() { + exec_sql "DROP DATABASE IF EXISTS crowdsec_test;" + exec_sql "CREATE DATABASE crowdsec_test;" + exec_sql "DROP USER IF EXISTS crowdsec_test;" + exec_sql "CREATE USER crowdsec_test WITH ENCRYPTED PASSWORD 'crowdsec_test';" + exec_sql "GRANT ALL PRIVILEGES ON DATABASE crowdsec_test TO crowdsec_test;" +} + +dump() { + backup_file="${1?Missing file to backup database to}" + pg_dump -Ft --dbname crowdsec_test --clean --create --file "${backup_file}" +} + +restore() { + backup_file="${1?missing file to restore database from}" + [[ -f "${backup_file}" ]] || die "Backup file ${backup_file} doesn't exist" + pg_restore --dbname crowdsec_test --clean "${backup_file}" +} + +config_yaml() { + yq e ' + .db_config.type=strenv(DB_BACKEND)| + .db_config.user="crowdsec_test" | + .db_config.password="crowdsec_test" | + .db_config.db_name="crowdsec_test" | + .db_config.host=strenv(PGHOST) | + .db_config.port=env(PGPORT) | + .db_config.sslmode="disable" | + del(.db_config.db_path) + ' -i "${CONFIG_YAML}" +} + +[[ $# -lt 1 ]] && about + +check_requirements + +case "$1" in + setup) + setup + ;; + config-yaml) + config_yaml + ;; + dump) + shift + dump "$@" + ;; + restore) + shift + restore "$@" + ;; + exec_sql) + shift + exec_sql "$@" + ;; + *) + about + ;; +esac; diff --git a/tests/lib/db/instance-sqlite b/tests/lib/db/instance-sqlite new file mode 100755 index 0000000..8c7d58f --- /dev/null +++ b/tests/lib/db/instance-sqlite @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +set -eu +script_name=$0 +DB_BACKEND=$(echo "${script_name}" | cut -d- -f2) +export DB_BACKEND + +die() { + echo >&2 "$@" + exit 1 +} + +about() { + die "usage: ${script_name} [ config-yaml | setup | dump | restore ]" +} + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +cd "${THIS_DIR}"/../../ +#shellcheck disable=SC1091 +. ./.environment.sh + +exec_sql() { + sqlite3 "${DB_FILE}" "$@" +} + +setup() { + : +} + +dump() { + backup_file="${1?Missing file to backup database to}" + # dirty fast cp. nothing should be accessing it right now, anyway. + [[ -f "${DB_FILE}" ]] || die "missing file ${DB_FILE}" + cp "${DB_FILE}" "${backup_file}" +} + +restore() { + backup_file="${1?missing file to restore database from}" + [[ -f "${backup_file}" ]] || die "Backup file ${backup_file} doesn't exist" + cp "${backup_file}" "${DB_FILE}" +} + +# you have not removed set -u above, have you? + +[[ -z "${CONFIG_YAML-}" ]] && die "\$CONFIG_YAML must be defined." + +# --------------------------- +# In most cases this is called with setup argument, and it shouldn't fail for missing config file. +if [[ -f "${CONFIG_YAML}" ]]; then + DATA_DIR=$(yq e '.config_paths.data_dir' "${CONFIG_YAML}") + DB_FILE="${DATA_DIR}/crowdsec.db" + export DB_FILE +fi + +config_yaml() { + yq e ' + .db_config.type=strenv(DB_BACKEND) | + .db_config.db_path=strenv(DB_FILE) | + .db_config.use_wal=true + ' -i "${CONFIG_YAML}" +} + +[[ $# -lt 1 ]] && about + +case "$1" in + config-yaml) + config_yaml + ;; + setup) + ;; + dump) + shift + dump "$@" + ;; + restore) + shift + restore "$@" + ;; + exec_sql) + shift + exec_sql "$@" + ;; + *) + about + ;; +esac; diff --git a/tests/lib/init/crowdsec-daemon b/tests/lib/init/crowdsec-daemon new file mode 100755 index 0000000..a232f34 --- /dev/null +++ b/tests/lib/init/crowdsec-daemon @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +set -eu +script_name=$0 + +die() { + echo >&2 "$@" + exit 1 +} + +about() { + die "usage: ${script_name} [ start | stop ]" +} + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +cd "${THIS_DIR}"/../../ +#shellcheck disable=SC1091 +. ./.environment.sh + +# you have not removed set -u above, have you? + +[[ -z "${CROWDSEC-}" ]] && die "\$CROWDSEC must be defined." +[[ -z "${LOG_DIR-}" ]] && die "\$LOG_DIR must be defined." +[[ -z "${PID_DIR-}" ]] && die "\$PID_DIR must be defined." + +if [[ ! -f "${CROWDSEC}" ]]; then + die "${CROWDSEC} is missing. Please build (with 'make bats-build') or install it." +fi + +DAEMON_PID=${PID_DIR}/crowdsec.pid + +start() { + daemonize \ + -p "${DAEMON_PID}" \ + -e "${LOG_DIR}/crowdsec.err" \ + -o "${LOG_DIR}/crowdsec.out" \ + "${CROWDSEC}" + ./bin/wait-for-port 6060 +} + +start_pid() { + start + cat "$DAEMON_PID" +} + +stop() { + if [[ -f "${DAEMON_PID}" ]]; then + # terminate quickly with extreme prejudice, all the application data will be + # thrown away anyway. also terminate the child processes (notification plugin). + PGID="$(ps -o pgid= -p "$(cat "${DAEMON_PID}")" | tr -d ' ')" + # ps above should work on linux, freebsd, busybox.. + if [[ -n "${PGID}" ]]; then + kill -- "-${PGID}" + fi + + rm -f -- "${DAEMON_PID}" + fi +} + + +# --------------------------- + +[[ $# -lt 1 ]] && about + +case "$1" in + start) + start + ;; + start-pid) + start_pid + ;; + stop) + stop + ;; + *) + about + ;; +esac; + diff --git a/tests/lib/init/crowdsec-systemd b/tests/lib/init/crowdsec-systemd new file mode 100755 index 0000000..fabc0ff --- /dev/null +++ b/tests/lib/init/crowdsec-systemd @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +set -eu +script_name=$0 + +die() { + echo >&2 "$@" + exit 1 +} + +about() { + die "usage: ${script_name} [ start | stop ]" +} + +#shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +cd "${THIS_DIR}"/../../ +#shellcheck disable=SC1091 +. ./.environment.sh + +# you have not removed set -u above, have you? + +[[ -z "${CROWDSEC-}" ]] && die "\$CROWDSEC must be defined." +[[ -z "${CSCLI-}" ]] && die "\$CSCLI must be defined." +[[ -z "${LOG_DIR-}" ]] && die "\$LOG_DIR must be defined." +[[ -z "${PID_DIR-}" ]] && die "\$PID_DIR must be defined." + + +if [[ ! -f "${CROWDSEC}" ]]; then + die "${CROWDSEC} is missing. Please build (with 'make bats-build') or install it." +fi + +start() { + systemctl start crowdsec + ./bin/wait-for-port 6060 +} + +start_pid() { + start + pidof /usr/bin/crowdsec +} + +stop() { + systemctl stop crowdsec # systemd doesn't throw error when stopping already stopped stuff + while pidof /usr/bin/crowdsec ; do sleep 0.1; done +} + + +# --------------------------- + +[[ $# -lt 1 ]] && about + +case "$1" in + start) + start + ;; + start-pid) + start_pid + ;; + stop) + stop + ;; + *) + about + ;; +esac; + diff --git a/tests/lib/setup.sh b/tests/lib/setup.sh new file mode 100755 index 0000000..03d207d --- /dev/null +++ b/tests/lib/setup.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +# these plugins are always available + +load "../lib/bats-support/load.bash" +load "../lib/bats-assert/load.bash" +#load "../lib/bats-file/load.bash" + +# mark the start of each test in the logs, beware crowdsec might be running +# echo "time=\"$(date +"%d-%m-%Y %H:%M:%S")\" level=info msg=\"TEST: ${BATS_TEST_DESCRIPTION}\"" >> /var/log/crowdsec.log + diff --git a/tests/lib/setup_file.sh b/tests/lib/setup_file.sh new file mode 100755 index 0000000..7e9401b --- /dev/null +++ b/tests/lib/setup_file.sh @@ -0,0 +1,213 @@ +#!/usr/bin/env bash + +# this should have effect globally, for all tests +# https://github.com/bats-core/bats-core/blob/master/docs/source/warnings/BW02.rst +bats_require_minimum_version 1.5.0 + +debug() { + echo 'exec 1<&-; exec 2<&-; exec 1>&3; exec 2>&1' +} +export -f debug + +# redirects stdout and stderr to &3 otherwise the errors in setup, teardown would +# go unreported. +# BUT - don't do this in test functions. Everything written to stdout and +# stderr after this line will go to the terminal, but in the tests, these +# are supposed to be collected and shown only in case of test failure +# (see options --print-output-on-failure and --show-output-of-passing-tests) +eval "$(debug)" + +# Allow tests to use relative paths for helper scripts. +# shellcheck disable=SC2164 +cd "${TEST_DIR}" + +# complain if there's a crowdsec running system-wide or leftover from a previous test +./bin/assert-crowdsec-not-running + +# we can prepend the filename to the test descriptions (useful to feed a TAP consumer) +if [[ "${PREFIX_TEST_NAMES_WITH_FILE:-false}" == "true" ]]; then + BATS_TEST_NAME_PREFIX="$(basename "${BATS_TEST_FILENAME}" .bats): " + export BATS_TEST_NAME_PREFIX +fi + +# before bats 1.7, we did that by hand +FILE= +export FILE + +# the variables exported here can be seen in other setup/teardown/test functions +# MYVAR=something +# export MYVAR + +# functions too +cscli() { + "${CSCLI}" "$@" +} +export -f cscli + +config_get() { + local cfg="${CONFIG_YAML}" + if [[ $# -ge 2 ]]; then + cfg="$1" + shift + fi + + yq e "$1" "${cfg}" +} +export -f config_get + +config_set() { + local cfg="${CONFIG_YAML}" + if [[ $# -ge 2 ]]; then + cfg="$1" + shift + fi + + yq e "$1" -i "${cfg}" +} +export -f config_set + +config_disable_agent() { + config_set 'del(.crowdsec_service)' +} +export -f config_disable_agent + +config_disable_lapi() { + config_set 'del(.api.server)' +} +export -f config_disable_lapi + +config_disable_capi() { + config_set 'del(.api.server.online_client)' +} +export -f config_disable_capi + +config_enable_capi() { + online_api_credentials="$(dirname "${CONFIG_YAML}")/online_api_credentials.yaml" \ + config_set '.api.server.online_client.credentials_path=strenv(online_api_credentials)' +} +export -f config_enable_capi + +# We use these functions like this: +# somecommand <(stderr) +# to provide a standard input to "somecommand". +# The alternatives echo "$stderr" or <<<"$stderr" +# ("here string" in bash jargon) +# are worse because they add a newline, +# even if the variable is empty. + +# shellcheck disable=SC2154 +stderr() { + printf '%s' "${stderr}" +} +export -f stderr + +# shellcheck disable=SC2154 +output() { + printf '%s' "${output}" +} +export -f output + +is_db_postgres() { + [[ "${DB_BACKEND}" =~ ^postgres|pgx$ ]] +} +export -f is_db_postgres + +is_db_mysql() { + [[ "${DB_BACKEND}" == "mysql" ]] +} +export -f is_db_mysql + +is_db_sqlite() { + [[ "${DB_BACKEND}" == "sqlite" ]] +} +export -f is_db_sqlite + +# Compare ignoring the key order, and allow "expected" without quoted identifiers. +# Preserve the output variable in case the following commands require it. +assert_json() { + local oldout="${output}" + # validate actual, sort + run -0 jq -Sen "${output}" + local actual="${output}" + + # handle stdin, quote identifiers, sort + local expected="$1" + if [[ "${expected}" == "-" ]]; then + expected="$(cat)" + fi + run -0 jq -Sn "${expected}" + expected="${output}" + + #shellcheck disable=SC2016 + run jq -ne --argjson a "${actual}" --argjson b "${expected}" '$a == $b' + #shellcheck disable=SC2154 + if [[ "${status}" -ne 0 ]]; then + echo "expect: $(jq -c <<<"${expected}")" + echo "actual: $(jq -c <<<"${actual}")" + diff <(echo "${actual}") <(echo "${expected}") + fail "json does not match" + fi + output="${oldout}" +} +export -f assert_json + +# Check if there's something on stdin by consuming it. Only use this as a way +# to check if something was passed by mistake, since if you read it, it will be +# incomplete. +is_stdin_empty() { + if read -r -t 0.1; then + return 1 + fi + return 0 +} +export -f is_stdin_empty + +assert_stderr() { + # it is never useful to call this without arguments + if [[ "$#" -eq 0 ]]; then + # maybe the caller forgot to use '-' with an heredoc + if ! is_stdin_empty; then + fail "${FUNCNAME[0]}: called with stdin and no arguments (heredoc?)" + fi + fail "${FUNCNAME[0]}: called with no arguments" + fi + + local oldout="${output}" + run -0 echo "${stderr}" + assert_output "$@" + output="${oldout}" +} +export -f assert_stderr + +# like refute_output, but for stderr +refute_stderr() { + # calling this without arguments is ok, as long as stdin in empty + if ! is_stdin_empty; then + fail "${FUNCNAME[0]}: called with stdin (heredoc?)" + fi + + local oldout="${output}" + run -0 echo "${stderr}" + refute_output "$@" + output="${oldout}" +} +export -f refute_stderr + +# like assert_output, but for stderr +assert_stderr_line() { + if [[ "$#" -eq 0 ]]; then + fail "${FUNCNAME[0]}: called with no arguments" + fi + + local oldout="${output}" + run -0 echo "${stderr}" + assert_line "$@" + output="${oldout}" +} +export -f assert_stderr_line + +# remove color and style sequences from stdin +plaintext() { + sed -E 's/\x1B\[[0-9;]*[JKmsu]//g' +} +export -f plaintext diff --git a/tests/lib/teardown_file.sh b/tests/lib/teardown_file.sh new file mode 100755 index 0000000..918bdec --- /dev/null +++ b/tests/lib/teardown_file.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# any stdout, stderr from now on will go to &3 +eval "$(debug)" + +# ensure we don't leave crowdsec running if tests are broken or interrupted +./instance-crowdsec stop + diff --git a/tests/localstack/docker-compose.yml b/tests/localstack/docker-compose.yml new file mode 100644 index 0000000..ca128e4 --- /dev/null +++ b/tests/localstack/docker-compose.yml @@ -0,0 +1,83 @@ +version: "3.8" + +services: + localstack: + container_name: localstack_main + image: localstack/localstack + network_mode: bridge + ports: + - "127.0.0.1:53:53" # only required for Pro (DNS) + - "127.0.0.1:53:53/udp" # only required for Pro (DNS) + - "127.0.0.1:443:443" # only required for Pro (LocalStack HTTPS Edge Proxy) + - "127.0.0.1:4510-4559:4510-4559" # external service port range + - "127.0.0.1:4566:4566" # LocalStack Edge Proxy + environment: + AWS_HOST: localstack + SERVICES: "cloudwatch,logs,kinesis" + DEBUG: "" + DATA_DIR: "" + LAMBDA_EXECUTOR: "" + KINESYS_ERROR_PROBABILITY: "" + DOCKER_HOST: "unix://var/run/docker.sock" + HOST_TMP_FOLDER: "/tmp" + KINESIS_INITIALIZE_STREAMS: "stream-1-shard:1,stream-2-shards:2" + HOSTNAME_EXTERNAL: "localstack" + AWS_ACCESS_KEY_ID: "AKIAIOSFODNN7EXAMPLE" + AWS_SECRET_ACCESS_KEY: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + AWS_REGION: "us-east-1" + AWS_ENDPOINT_FORCE: "http://localhost:4566" + + volumes: + - "${TMPDIR:-/tmp}/localstack:/tmp/localstack" + - "/var/run/docker.sock:/var/run/docker.sock" + + zoo1: + image: confluentinc/cp-zookeeper:7.1.1 + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_SERVER_ID: 1 + ZOOKEEPER_SERVERS: zoo1:2888:3888 + + healthcheck: + test: jps -l | grep zookeeper + interval: 10s + retries: 5 + timeout: 5s + + kafka1: + image: crowdsecurity/kafka-ssl + ports: + - "9093:9093" + - "9092:9092" + - "9999:9999" + environment: + KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://127.0.0.1:19092,LISTENER_DOCKER_EXTERNAL://127.0.0.1:9092,LISTENER_DOCKER_EXTERNAL_SSL://127.0.0.1:9093 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL_SSL:SSL + KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL + KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181" + KAFKA_BROKER_ID: 1 + KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO" + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_JMX_PORT: 9999 + KAFKA_JMX_HOSTNAME: "127.0.0.1" + KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer + KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" + KAFKA_SSL_KEYSTORE_FILENAME: kafka.kafka1.keystore.jks + KAFKA_SSL_KEYSTORE_CREDENTIALS: kafka1_keystore_creds + KAFKA_SSL_KEY_CREDENTIALS: kafka1_sslkey_creds + KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.kafka1.truststore.jks + KAFKA_SSL_TRUSTSTORE_CREDENTIALS: kafka1_truststore_creds + KAFKA_SSL_ENABLED_PROTOCOLS: TLSv1.2 + KAFKA_SSL_PROTOCOL: TLSv1.2 + KAFKA_SSL_CLIENT_AUTH: none + KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" + + healthcheck: + test: kafka-broker-api-versions --version + interval: 10s + retries: 5 + timeout: 10s diff --git a/tests/run-tests b/tests/run-tests new file mode 100755 index 0000000..29052fb --- /dev/null +++ b/tests/run-tests @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -eu + +die() { + echo >&2 "$@" + exit 1 +} + +# shellcheck disable=SC1007 +TEST_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +# shellcheck source=./.environment.sh +. "${TEST_DIR}/.environment.sh" + +"${TEST_DIR}/bin/check-requirements" + +echo "Running tests..." +echo "DB_BACKEND: ${DB_BACKEND}" +if [[ -z "${TEST_COVERAGE}" ]]; then + echo "Coverage report: no" +else + echo "Coverage report: yes" + rm -f "${LOCAL_DIR}/var/lib/coverage/*" + mkdir -p "${LOCAL_DIR}/var/lib/coverage" +fi + +dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")" +if [[ "${DB_BACKEND}" != "${dump_backend}" ]]; then + die "Can't run with backend '${DB_BACKEND}' because the test data was build with '${dump_backend}'" +fi + +if [[ $# -ge 1 ]]; then + echo "test files: $*" + "${TEST_DIR}/lib/bats-core/bin/bats" \ + --jobs 1 \ + --timing \ + --print-output-on-failure \ + "$@" +else + echo "test files: ${TEST_DIR}/bats ${TEST_DIR}/dyn-bats" + "${TEST_DIR}/lib/bats-core/bin/bats" \ + --jobs 1 \ + --timing \ + --print-output-on-failure \ + "${TEST_DIR}/bats" "${TEST_DIR}/dyn-bats" +fi + +if [[ -n "${TEST_COVERAGE}" ]]; then + # empty files just to avoid merge errors + touch "${LOCAL_DIR}"/var/lib/coverage/crowdsec- + touch "${LOCAL_DIR}"/var/lib/coverage/cscli- + gocovmerge "${LOCAL_DIR}"/var/lib/coverage/crowdsec-* > "${LOCAL_DIR}/var/lib/coverage/coverage-crowdsec.out" + gocovmerge "${LOCAL_DIR}"/var/lib/coverage/cscli-* > "${LOCAL_DIR}/var/lib/coverage/coverage-cscli.out" +fi diff --git a/windows/Chocolatey/crowdsec/ReadMe.md b/windows/Chocolatey/crowdsec/ReadMe.md new file mode 100644 index 0000000..431c518 --- /dev/null +++ b/windows/Chocolatey/crowdsec/ReadMe.md @@ -0,0 +1,133 @@ +## Summary +How do I create packages? See https://docs.chocolatey.org/en-us/create/create-packages + +If you are submitting packages to the community feed (https://community.chocolatey.org) +always try to ensure you have read, understood and adhere to the create +packages wiki link above. + +## Automatic Packaging Updates? +Consider making this package an automatic package, for the best +maintainability over time. Read up at https://docs.chocolatey.org/en-us/create/automatic-packages + +## Shim Generation +Any executables you include in the package or download (but don't call +install against using the built-in functions) will be automatically shimmed. + +This means those executables will automatically be included on the path. +Shim generation runs whether the package is self-contained or uses automation +scripts. + +By default, these are considered console applications. + +If the application is a GUI, you should create an empty file next to the exe +named 'name.exe.gui' e.g. 'bob.exe' would need a file named 'bob.exe.gui'. +See https://docs.chocolatey.org/en-us/create/create-packages#how-do-i-set-up-shims-for-applications-that-have-a-gui + +If you want to ignore the executable, create an empty file next to the exe +named 'name.exe.ignore' e.g. 'bob.exe' would need a file named +'bob.exe.ignore'. +See https://docs.chocolatey.org/en-us/create/create-packages#how-do-i-exclude-executables-from-getting-shims + +## Self-Contained? +If you have a self-contained package, you can remove the automation scripts +entirely and just include the executables, they will automatically get shimmed, +which puts them on the path. Ensure you have the legal right to distribute +the application though. See https://docs.chocolatey.org/en-us/information/legal. + +You should read up on the Shim Generation section to familiarize yourself +on what to do with GUI applications and/or ignoring shims. + +## Automation Scripts +You have a powerful use of Chocolatey, as you are using PowerShell. So you +can do just about anything you need. Choco has some very handy built-in +functions that you can use, these are sometimes called the helpers. + +### Built-In Functions +https://docs.chocolatey.org/en-us/create/functions + +A note about a couple: +* Get-BinRoot - this is a horribly named function that doesn't do what new folks think it does. It gets you the 'tools' root, which by default is set to 'c:\tools', not the chocolateyInstall bin folder - see https://docs.chocolatey.org/en-us/create/functions/get-toolslocation +* Install-BinFile - used for non-exe files - executables are automatically shimmed... - see https://docs.chocolatey.org/en-us/create/functions/install-binfile +* Uninstall-BinFile - used for non-exe files - executables are automatically shimmed - see https://docs.chocolatey.org/en-us/create/functions/uninstall-binfile + +### Getting package specific information +Use the package parameters pattern - see https://docs.chocolatey.org/en-us/guides/create/parse-packageparameters-argument + +### Need to mount an ISO? +https://docs.chocolatey.org/en-us/guides/create/mount-an-iso-in-chocolatey-package + +### Environment Variables +Chocolatey makes a number of environment variables available (You can access any of these with $env:TheVariableNameBelow): + + * TEMP/TMP - Overridden to the CacheLocation, but may be the same as the original TEMP folder + * ChocolateyInstall - Top level folder where Chocolatey is installed + * ChocolateyPackageName - The name of the package, equivalent to the `` field in the nuspec (0.9.9+) + * ChocolateyPackageTitle - The title of the package, equivalent to the `` field in the nuspec (0.10.1+) + * ChocolateyPackageVersion - The version of the package, equivalent to the `<version />` field in the nuspec (0.9.9+) + * ChocolateyPackageFolder - The top level location of the package folder - the folder where Chocolatey has downloaded and extracted the NuGet package, typically `C:\ProgramData\chocolatey\lib\packageName`. + +#### Advanced Environment Variables +The following are more advanced settings: + + * ChocolateyPackageParameters - Parameters to use with packaging, not the same as install arguments (which are passed directly to the native installer). Based on `--package-parameters`. (0.9.8.22+) + * CHOCOLATEY_VERSION - The version of Choco you normally see. Use if you are 'lighting' things up based on choco version. (0.9.9+) - Otherwise take a dependency on the specific version you need. + * ChocolateyForceX86 = If available and set to 'true', then user has requested 32bit version. (0.9.9+) - Automatically handled in built in Choco functions. + * OS_PLATFORM - Like Windows, OSX, Linux. (0.9.9+) + * OS_VERSION - The version of OS, like 6.1 something something for Windows. (0.9.9+) + * OS_NAME - The reported name of the OS. (0.9.9+) + * USER_NAME = The user name (0.10.6+) + * USER_DOMAIN = The user domain name (could also be local computer name) (0.10.6+) + * IS_PROCESSELEVATED = Is the process elevated? (0.9.9+) + * IS_SYSTEM = Is the user the system account? (0.10.6+) + * IS_REMOTEDESKTOP = Is the user in a terminal services session? (0.10.6+) + * ChocolateyToolsLocation - formerly 'ChocolateyBinRoot' ('ChocolateyBinRoot' will be removed with Chocolatey v2.0.0), this is where tools being installed outside of Chocolatey packaging will go. (0.9.10+) + +#### Set By Options and Configuration +Some environment variables are set based on options that are passed, configuration and/or features that are turned on: + + * ChocolateyEnvironmentDebug - Was `--debug` passed? If using the built-in PowerShell host, this is always true (but only logs debug messages to console if `--debug` was passed) (0.9.10+) + * ChocolateyEnvironmentVerbose - Was `--verbose` passed? If using the built-in PowerShell host, this is always true (but only logs verbose messages to console if `--verbose` was passed). (0.9.10+) + * ChocolateyExitOnRebootDetected - Are we exiting on a detected reboot? Set by ` --exit-when-reboot-detected` or the feature `exitOnRebootDetected` (0.11.0+) + * ChocolateyForce - Was `--force` passed? (0.9.10+) + * ChocolateyForceX86 - Was `-x86` passed? (CHECK) + * ChocolateyRequestTimeout - How long before a web request will time out. Set by config `webRequestTimeoutSeconds` (CHECK) + * ChocolateyResponseTimeout - How long to wait for a download to complete? Set by config `commandExecutionTimeoutSeconds` (CHECK) + * ChocolateyPowerShellHost - Are we using the built-in PowerShell host? Set by `--use-system-powershell` or the feature `powershellHost` (0.9.10+) + +#### Business Edition Variables + + * ChocolateyInstallArgumentsSensitive - Encrypted arguments passed from command line `--install-arguments-sensitive` that are not logged anywhere. (0.10.1+ and licensed editions 1.6.0+) + * ChocolateyPackageParametersSensitive - Package parameters passed from command line `--package-parameters-senstivite` that are not logged anywhere. (0.10.1+ and licensed editions 1.6.0+) + * ChocolateyLicensedVersion - What version is the licensed edition on? + * ChocolateyLicenseType - What edition / type of the licensed edition is installed? + * USER_CONTEXT - The original user context - different when self-service is used (Licensed v1.10.0+) + +#### Experimental Environment Variables +The following are experimental or use not recommended: + + * OS_IS64BIT = This may not return correctly - it may depend on the process the app is running under (0.9.9+) + * CHOCOLATEY_VERSION_PRODUCT = the version of Choco that may match CHOCOLATEY_VERSION but may be different (0.9.9+) - based on git describe + * IS_ADMIN = Is the user an administrator? But doesn't tell you if the process is elevated. (0.9.9+) + * IS_REMOTE = Is the user in a remote session? (0.10.6+) + +#### Not Useful Or Anti-Pattern If Used + + * ChocolateyInstallOverride = Not for use in package automation scripts. Based on `--override-arguments` being passed. (0.9.9+) + * ChocolateyInstallArguments = The installer arguments meant for the native installer. You should use chocolateyPackageParameters instead. Based on `--install-arguments` being passed. (0.9.9+) + * ChocolateyIgnoreChecksums - Was `--ignore-checksums` passed or the feature `checksumFiles` turned off? (0.9.9.9+) + * ChocolateyAllowEmptyChecksums - Was `--allow-empty-checksums` passed or the feature `allowEmptyChecksums` turned on? (0.10.0+) + * ChocolateyAllowEmptyChecksumsSecure - Was `--allow-empty-checksums-secure` passed or the feature `allowEmptyChecksumsSecure` turned on? (0.10.0+) + * ChocolateyChecksum32 - Was `--download-checksum` passed? (0.10.0+) + * ChocolateyChecksumType32 - Was `--download-checksum-type` passed? (0.10.0+) + * ChocolateyChecksum64 - Was `--download-checksum-x64` passed? (0.10.0)+ + * ChocolateyChecksumType64 - Was `--download-checksum-type-x64` passed? (0.10.0)+ + * ChocolateyPackageExitCode - The exit code of the script that just ran - usually set by `Set-PowerShellExitCode` (CHECK) + * ChocolateyLastPathUpdate - Set by Chocolatey as part of install, but not used for anything in particular in packaging. + * ChocolateyProxyLocation - The explicit proxy location as set in the configuration `proxy` (0.9.9.9+) + * ChocolateyDownloadCache - Use available download cache? Set by `--skip-download-cache`, `--use-download-cache`, or feature `downloadCache` (0.9.10+ and licensed editions 1.1.0+) + * ChocolateyProxyBypassList - Explicitly set locations to ignore in configuration `proxyBypassList` (0.10.4+) + * ChocolateyProxyBypassOnLocal - Should the proxy bypass on local connections? Set based on configuration `proxyBypassOnLocal` (0.10.4+) + * http_proxy - Set by original `http_proxy` passthrough, or same as `ChocolateyProxyLocation` if explicitly set. (0.10.4+) + * https_proxy - Set by original `https_proxy` passthrough, or same as `ChocolateyProxyLocation` if explicitly set. (0.10.4+) + * no_proxy- Set by original `no_proxy` passthrough, or same as `ChocolateyProxyBypassList` if explicitly set. (0.10.4+) + diff --git a/windows/Chocolatey/crowdsec/crowdsec.nuspec b/windows/Chocolatey/crowdsec/crowdsec.nuspec new file mode 100644 index 0000000..459438d --- /dev/null +++ b/windows/Chocolatey/crowdsec/crowdsec.nuspec @@ -0,0 +1,45 @@ +<?xml version="1.0" encoding="utf-8"?> +<!-- Do not remove this test for UTF-8: if “Ω” doesn’t appear as greek uppercase omega letter enclosed in quotation marks, you should use an editor that supports UTF-8, not this one. --> +<package xmlns="http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd"> + <metadata> + <id>crowdsec</id> + <!-- The version is set by the version argument of choco pack, so we just specify a dummy version here --> + <version>0.0.1</version> + <packageSourceUrl>https://github.com/crowdsecurity/crowdsec</packageSourceUrl> + <owners>CrowdSecurity</owners> + <!-- ============================== --> + + <!-- == SOFTWARE SPECIFIC SECTION == --> + <title>CrowdSec + CrowdSecurity + https://crowdsec.net/ + CrowdSec, 2022 + https://github.com/crowdsecurity/crowdsec/blob/master/LICENSE + true + https://github.com/crowdsecurity/crowdsec + https://docs.crowdsec.net + https://github.com/crowdsecurity/crowdsec/issues + https://cdn.jsdelivr.net/gh/crowdsecurity/crowdsec-docs@c7b90095cca0007652f22c5c6d945c3416df4693/crowdsec-docs/static/img/crowdsec_no_txt.png + crowdsec crowdsecurity security ips ids + CrowdSec IDS + + CrowdSec is a free, modern and collaborative behavior detection engine, coupled with a global IP reputation network. + It stacks on fail2ban's philosophy but is IPV6 compatible and 60x faster (Go vs Python), uses Grok patterns to parse logs and YAML scenario to identify behaviors. + CrowdSec is engineered for modern Cloud / Containers / VM based infrastructures (by decoupling detection and remediation). Once detected you can remedy threats with various bouncers (firewall block, nginx http 403, Captchas, etc.) while the aggressive IP can be sent to CrowdSec for curation before being shared among all users to further improve everyone's security. + + ### Package Specific + #### Package parameters + + - AgentOnly: If set, the local API will be disabled. You will need to register the agent in LAPI yourself and configure the service to start on boot. + + + + + + + + + + + + diff --git a/windows/Chocolatey/crowdsec/tools/LICENSE.txt b/windows/Chocolatey/crowdsec/tools/LICENSE.txt new file mode 100644 index 0000000..5c65369 --- /dev/null +++ b/windows/Chocolatey/crowdsec/tools/LICENSE.txt @@ -0,0 +1,26 @@ + +From: https://github.com/crowdsecurity/crowdsec/blob/master/LICENSE + +LICENSE + +MIT License + +Copyright (c) 2022 crowdsec + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/windows/Chocolatey/crowdsec/tools/VERIFICATION.txt b/windows/Chocolatey/crowdsec/tools/VERIFICATION.txt new file mode 100644 index 0000000..eb16249 --- /dev/null +++ b/windows/Chocolatey/crowdsec/tools/VERIFICATION.txt @@ -0,0 +1,9 @@ + + +VERIFICATION +Verification is intended to assist the Chocolatey moderators and community +in verifying that this package's contents are trustworthy. + +This package is published by CrowdSecurity itself. The MSI is identical to the one published in the github releases for the project. +You can download the MSI from the latest release or pre-release here: https://github.com/crowdsecurity/crowdsec/releases +The MSI is also digitally signed. \ No newline at end of file diff --git a/windows/Chocolatey/crowdsec/tools/chocolateybeforemodify.ps1 b/windows/Chocolatey/crowdsec/tools/chocolateybeforemodify.ps1 new file mode 100644 index 0000000..bb71f3b --- /dev/null +++ b/windows/Chocolatey/crowdsec/tools/chocolateybeforemodify.ps1 @@ -0,0 +1 @@ +Stop-Service crowdsec \ No newline at end of file diff --git a/windows/Chocolatey/crowdsec/tools/chocolateyinstall.ps1 b/windows/Chocolatey/crowdsec/tools/chocolateyinstall.ps1 new file mode 100644 index 0000000..6c817a0 --- /dev/null +++ b/windows/Chocolatey/crowdsec/tools/chocolateyinstall.ps1 @@ -0,0 +1,29 @@ +$ErrorActionPreference = 'Stop'; +$toolsDir = "$(Split-Path -parent $MyInvocation.MyCommand.Definition)" +$fileLocation = Join-Path $toolsDir 'crowdsec.msi' + +$silentArgs = "/qn /norestart /l*v `"$($env:TEMP)\$($packageName).$($env:chocolateyPackageVersion).MsiInstall.log`"" + + +$pp = Get-PackageParameters + +if ($pp['AgentOnly']) { + $silentArgs += " AGENT_ONLY=1" +} + + +$packageArgs = @{ + packageName = $env:ChocolateyPackageName + unzipLocation = $toolsDir + fileType = 'msi' + file64 = $fileLocation + softwareName = 'Crowdsec' + silentArgs = $silentArgs + validExitCodes= @(0, 3010, 1641) +} + +Install-ChocolateyInstallPackage @packageArgs + +if ($pp['AgentOnly']) { + Write-Host "/AgentOnly was specified. LAPI is disabled, please register your agent manually and configure the service to start on boot." +} \ No newline at end of file diff --git a/windows/Chocolatey/crowdsec/tools/chocolateyuninstall.ps1 b/windows/Chocolatey/crowdsec/tools/chocolateyuninstall.ps1 new file mode 100644 index 0000000..ec6ec3d --- /dev/null +++ b/windows/Chocolatey/crowdsec/tools/chocolateyuninstall.ps1 @@ -0,0 +1,30 @@ +$ErrorActionPreference = 'Stop'; +$packageArgs = @{ + packageName = $env:ChocolateyPackageName + softwareName = 'Crowdsec' + fileType = 'MSI' + silentArgs = "/qn /norestart" + validExitCodes= @(0, 3010, 1605, 1614, 1641) +} + +[array]$key = Get-UninstallRegistryKey -SoftwareName $packageArgs['softwareName'] + +if ($key.Count -eq 1) { + $key | % { + $packageArgs['file'] = "$($_.UninstallString)" + if ($packageArgs['fileType'] -eq 'MSI') { + $packageArgs['silentArgs'] = "$($_.PSChildName) $($packageArgs['silentArgs'])" + $packageArgs['file'] = '' + } else { + } + + Uninstall-ChocolateyPackage @packageArgs + } +} elseif ($key.Count -eq 0) { + Write-Warning "$packageName has already been uninstalled by other means." +} elseif ($key.Count -gt 1) { + Write-Warning "$($key.Count) matches found!" + Write-Warning "To prevent accidental data loss, no programs will be uninstalled." + Write-Warning "Please alert package maintainer the following keys were matched:" + $key | % {Write-Warning "- $($_.DisplayName)"} +} \ No newline at end of file diff --git a/windows/README.md b/windows/README.md new file mode 100644 index 0000000..57ce70d --- /dev/null +++ b/windows/README.md @@ -0,0 +1,51 @@ +# Building Crowdsec for Windows + +We provide scripts for PowerShell Core (>=v7.0). You can install it from [The latest GitHub releases](https://github.com/PowerShell/PowerShell/releases). Download the appropriate .msi file and execute it. + +Now, run PowerShell as Administrator, go in the crowdsec repository (if you +already cloned it) and run: + +```powershell +PS C:\Users\johndoe\src\crowdsec> powershell .\windows\install_dev_windows.ps1 +[...] +``` + +If you don't have git you can download the script and execute it, it will +install git for you. + +Now you should have the right dependencies (go, gcc, git). You can verify with +`choco list --localonly`. This is enough to build from sources, but if you want +to also build the choco or MSI packages, you need more dependencies: + +```powershell +PS C:\Users\johndoe\src\crowdsec> powershell .\windows\install_installer_windows.ps1 +[...] +``` + +You can now use + +* `make` to build cmd\crowdsec\crowdsec.exe and cmd\crowdsec-cli\cscli.exe +* `make test` to run unit tests. Some tests requiring localstack are disabled. Functional tests are also only available on unix systems. + +* `make windows_installer` to build a `crowdsec_x.y.z.msi` installer +* `make chocolatey` to build a package under `.\windows\Chocolatey\crowdsec\crowdsec_x.y.z.nupkg` which you can test using `choco install ` + +After installing CrowdSec, the binaries are in `C:\Program Files\CrowdSec\`: + +```powershell +PS C:\Users\johndoe\src\crowdsec> & 'C:\Program Files\CrowdSec\cscli.exe' metrics +[...] +PS C:\Users\johndoe\src\crowdsec> & 'C:\Program Files\CrowdSec\cscli.exe' parsers install crowdsecurity/syslog-logs +[...] +``` + +To start/stop the service: + +```powershell +PS C:\Users\johndoe\src\crowdsec> net stop crowdsec +The CrowdSec service is stopping.. +The CrowdSec service was stopped successfully. +PS C:\Users\johndoe\src\crowdsec> net start crowdsec +The CrowdSec service is starting. +The CrowdSec service was started successfully. +``` diff --git a/windows/install_dev_windows.ps1 b/windows/install_dev_windows.ps1 new file mode 100644 index 0000000..325d8b1 --- /dev/null +++ b/windows/install_dev_windows.ps1 @@ -0,0 +1,6 @@ +#install choco +Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) +choco install -y golang +choco install -y git +choco install -y mingw +refreshenv diff --git a/windows/install_installer_windows.ps1 b/windows/install_installer_windows.ps1 new file mode 100644 index 0000000..a2ae1d5 --- /dev/null +++ b/windows/install_installer_windows.ps1 @@ -0,0 +1,2 @@ +choco install -y wixtoolset +$env:Path += ";C:\Program Files (x86)\WiX Toolset v3.11\bin" \ No newline at end of file diff --git a/windows/installer/WixUI_HK.wxs b/windows/installer/WixUI_HK.wxs new file mode 100644 index 0000000..38cccbd --- /dev/null +++ b/windows/installer/WixUI_HK.wxs @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 1 + "1"]]> + + 1 + + NOT Installed + Installed AND PATCH + + 1 + 1 + NOT WIXUI_DONTVALIDATEPATH + "1"]]> + WIXUI_DONTVALIDATEPATH OR WIXUI_INSTALLDIR_VALID="1" + + 1 + 1 + + Installed + + 1 + + 1 + 1 + 1 + + Updating Hub content + Installing Windows collection + Registering agent to local API + Registering to Crowdsec central API + + + + + + + + + + + + diff --git a/windows/installer/crowdsec_icon.ico b/windows/installer/crowdsec_icon.ico new file mode 100644 index 0000000000000000000000000000000000000000..93763e2bd672bb1db1393ea01c84c34d4e1463a2 GIT binary patch literal 4286 zcmbtY`%_eP6u;Cz&`)h+Gv@R|X)~!!%~e|XyuDQkx>iOfYxP!UE!Y2`R+Z}yRVS(G9S+Op6~g7KIfkA zx!=nmNkRBCV}?Y(rJ5Hc=|xGBW+Kofg&@cu=mG#qXSCNL?jfuS&Z0FVN)YKg-e0%_ zaTt+|*n&t10Ggv1id{o%6%OJ$hhTovGLyEVU#u&2$hxB=psPd_4g=0JE+bbZ0;U4} z{NQ3;g~P7PS9zF^?O|2lGM1{djDd>UF)@3qn%_U(G1!Odij}ya^ip&8oIG53@Umf|#w_%@NDN8;2Gz4VXhxa#^0rNjVhw z)M1rIf1Vnv)jx?f!TRk=s>=4(=KDj7{O1t2xA6-P&SSW*Pxh3?Z)@~l^N7{vP&p!R z(O=NBo4;*oWz?FI5doKCzqw)?nGe-b{bG>@$53>7$Q)o;^i; z)Za=Cq;)q%?{f|q-xDy0??(dGpL6&wbg`Pl&iTO)k*maquhzYvm_sk?vc=e+ed4(W zmaz)dxx!lhtZAu74Xns@+Ap8Gr`uSUEynuh`$WGTHJkkDYJG-ROPv?u# zUe3B~F;?BLYkd>YXOrR1-~?>@$w}wJ->6ff)$%vaUKGyyg}BC*(&!W%#B~)pMLs9+ zT{PMatjiYXqG>+!yc1mj{X-MNp6eQ)Ab)Z6>TmpMYJGS%$#h1h#HMD@n#S&NS<6M- zI_d$VJr4ce{g+5|J^LsR^lcJ;nnOIa%d)9)blm2UA+5gs}_`}YOzc6Nd} zGD@6}dxFE{$h_ag*|)RV3B-Y1GI44(aJ*5`{s_meH)`y#ImZf#N7~?=bwF;v=>qG` zG5HhY@4}aqeKDQ&TaH!HV}ZkF1J~%N7@t0pBlPJzu|EIGZsH-nj_y&pz1sz6F8wQb z$UpD+AU&rG5rarTfIg#6X8vDlE$)5m=}~<+my776n5NP)p*hs=etx=XIG8Q;jp56# zVoVXm5&CZR2(H$;dX+0XG0VLVZtc#QHN#L4yH&)~n##57#j`aQ74w=Z^p_(XG++Bw zs?fi8qZ23xx{iGsFV>etEPsSCM-a3IkV9}0C*3nbjy|9qZXo)XRQ5sfia6E+)5nRj zA*~9sw;Uv23&N-Ohlhrsxx50bryAhVz@W;@{Aj;KwC8R}hYY<1^5c?Kese?=tS`7i z{m_q?jTl70oNC*|+!MBO8Y}26j*30NoK!b`<`0N^Xm4(XuC_M#v$Yjk&zy(M^pnEB zZ+oeliz)WAJ;-yP`LM?(-m4nmJ{&;*n^-#P|BD>sWv`?5anj$vEuV7|_N4M|QSHk+ zTpn|2h6c{TgnYNXI63wLX)F5W{$l1<+o+E&kbXi=JFgQqrojo1#{ry(upiZWiMbV} zw^_gWmD&-rKW_AuYV`T6zOKZMkkx0@?|Ps5j5iOOK(b&GXqg=5YKTk;AqF zEy-F=98=E`msO)r-z)NMo@N<)4((dR@LJQgO{nk4TbBwe%6egXEuP&N_Yxwq{4S*I zZ-TG)orM_lEqK}Do|}d?qT(K8C5iLEf$QRli#Qc-@xJ4|RzIJ35$|$NY*GIm%7N-I zMf`*DbC`!MYP_##88yCH=yM{}zsY!xEb`c=s7<*(&~osYG!SEmS?nW=_p@KGcY1u2 zcpkZEAExsB^saB<^;-LCoim8Ew7$v4$aQ&&`jl(SWV}y1k6haN(>pnc=tpo(?%=x%l}zIR<^J5Y(5HTbRaAeMfnqJ z48~cN*xYB;<@LxHqHxlO-$T#)9^+b-*!kXb&vK8PHHiI_abS!IQ8-x~&l~XH1O9Mc nlAaF)ikJLcE=|Yt!W)soVH8{RG?NR2!vfsP;jJzlu98%T3jlkLQp8g2(M|_BtQZrKmsH{0wh2JBtQZrKmsH{0wh2JBtQZrKmsH{0wh2JBtQZr zKmwj25H+)`5V0GPi^xGNM5H5TBVI;~KqMgI5d#s|yx5^rdq>=f?be7^h+7diB7W`& z+1C;wZKSOnWA@Vuec#|8L&lOZW$XzE$whLK+%_O2SIPNH&wykiBtQZrKmsHXVFG^t zDU?l(o^b7HgbHVj@7U!_dS!M4=rJS{*=2uuz zODnCYHxUc)ITicd{TUBe=Z4eQ(ifba6}7nH)u_4U!NzrSdAesnG7%CW0TLhq5{NK? zs6`cNQ7fucRJKbjs!(vc)0Jc4*frwWanYTvq|V)}sa<;@dRnh^>8=u^J2z|oJ>kBX z6yz|+m&dY76*W8PZw51;mm`da-ID+bkN^pgfM*E6k6jCzzSga$ zm41J$n$ssPAkwk@diP!ye11XhMxRe3k5BAe$BkJ*hqIESF%tykVVuH9S4m>1yZp?A{Nk37uk^?Cr^&yFHFPwgu#}9Ntc8)Z4ux zjECKm011!)36Owi2*8o|26)oxYrUOmpJ(0zKM!|0+HG6r;S)P`ZIZV!&%PUZRGpWL zoL_JFz2KZ{xAP20CPD%vKmsH{0ud$vNB;IaPijw_rL0xUwQ4iV%HZdYZjN@_mwJD! z1s{j>cG9rPcLdF2p8b2l>Fl3I7!SKA0TLhq5+DIj5eVu^)7kP_u1Uk3cjka++qwBU z*uGgk`&gl_Grws?W!s>6$QpUzId#dS&QlPX3<;0`36KB@Tm%AeWl>OHI=zj#_OEB0 zZX3+c!S+vy=@Hc1n1>(5_}v@LVP3g&j=l3Dpy3cCKmsH{0wmxm0&wN#Mtx~|dx3AR z-3@L&9?Z{f`_pi4JJ@|-V{yMX#!qN8hb&yb9(Z1Rlcyju84@4?5+DH*xCjK`%FIT6 zX*yfhsqwrrcsbY2&5iB*SGQiS=e6PTY>b~2GKchld$#9a1T-9i1W14cNPq-9MF6f$ z4(UtN+Y2g^;pVKy{OndI*O9y3w>>txb1KH48!`v~`E>t$`*lx2WHKZ`0wh2JBybT3 zz?JbKeQ7#-c~u6SJj>0?q4wdKY3I$g;q0d{{)Uh_)X(wt8abb*auNP-a0n700TLhq zZxMhiqeJ@Abar;tCb;>DP=0o+$Fo-4u0;JAOR8}fyfhp6Svx4gg=j;wUqO3?e25-TsV43Nwb~pTpzzKyGrKS zJ}-Z;=iGS%A1kvOoW0EVEYtmwC&Cd(fCNZ@1W3S}1mMg!wKrc}S8eM$zo@oOZyY{j zxHG$|2jY0r;{dVvhEvl$hcAy#3!T)pbWs z)lapz9%@({M;p1JNcD9?|qqn~2OUAYD&6Y7l)JIO%snuWC|8|gE%-o`~Di>n= zR_~EyMkGK2BtQZr5IF*HUmHYufD_^D110sI&i3D?DN=$57qj;=d5oEYpo7xW$MYSa#dPksWBfL>(kj)-@?z2 zI-M=P4)(WxUS@!|@hlZtAAcC*9?@gtytO4`$(S;B9&%W>_ne!vW#3Zy4&EWkOh|wP zNPq-LAp8X2yK4~#wJWdBu2c_X7OOv|?XDa0MxK?i;iQ$dvC7)Kr$(*ItF}JdU1N1z ze8Q5t;WM_VxOqi&sr`nWckTFz;zPEwOG`>rMOm4>UA%K=fU_rdiczD6Cg0O#(NXo# z+ymConcLNa83(PLT{U&YA9Fsvjpvo*rfCNZ@ z1V|wK1hA(3L%6z7eBBRoT&Fi>`*=~2?b|J@Ry*6ALYt0=?dGwEcJ8u|+5h!6RkS1D z-rl!$Yk;$-_2|7B$KH=PYxc2a)hgTB$BT*sj?wK#Cw-vT%MWmUJpBJZ*dqy$011$Q zcL>1M3&htFV?Bc>vXwMV7Q&&jbJ{cX+$CEnJ5 zx$mts`;gBCh{*_Se6jn)>^^KF?b2{H@??Gi}&E%z1XEcgQjm5+DH*AOR8x zF9EpP_pH)8mYuOu`~Au3QhWQGk3P0Ocq7H$hF2>QZ4mOzadW@#^SZeiTG49j>NNq* zUir$a=e76uA-=xL?C-;r*=qZTAG!Kkx5K%2J=d-YFCF$q0wh2JBtQb*ApmFR%N)Dg z!XtIsfzC3u|Klq8nbf-Er)SrtJ~UW-djcWv^BU`HtG5>p8R~QPF|4a+J@t%w=jDm& z#G&te^Jd*9NI3}mpGWxC$zK~WMNN8c{<&F$o(bl8l`!o~?d%=iAhRuuLEHQLk~Q}9aWB0cbI;O~=DN4)lX*zVHRB1> zzw&u{a7w;~-x|725ps+UFCF$q0wh2JBtQb*A^_I~TsyA)9Hh*GQ{~*V?cVg*e(HsZ zE7hg5-R}E65Ie6(#m+0nb#MFC(^EcC zDUSr4Czrm5WBi;TIoQ7;jQTshMU~l*011!)36Mbe3BY@;4fnbE(A>8aK20{AJvwQv zS#N)qcWFBRrhgImE>Asff4JN16Q4(+?qglwgz|UcC&L~|fCNZ@1W3Sp1mMJx2))j1 zd)LL4>*3Q@h-&TF&Wnz@w(tI~?|J36wkx6VTmRGWer#voGagUW-Y%QdC-yqj7dYFv zY)j4~y+@Q8kpKyh011#lWC*}}az5DAy~h4JobPT2S0B{Ao<4MZqsQP~AGWh|_6L3T zsIQMPlU8eIOSz@Pk`g)B-e~{+`h$_7!d^*$1W14cNWfbJ;Jw=%ZY*)|&xvLm%syWwA zKwYj;zZRb*C&}$CW50~uHvDARBMFcI36KB@c#8nMd$q%V<8+4DFYgf9=dUlqd1iS| zsqNK!my`r`wHy;Suh4g0_;ZurSC7s9R^76q;l43{pZ4)LqYlqC(|ud7$0yx^HfQ|p zB!_E_yb{gh@g07w>sXQi36KB@kiex(0RB5Z6UE#wYp)L;bd)W_X%QH`Ew7dU3 zhwW>O;2QcyyHZuU*R8_td{m3uv44YzACC5pDYFcJe$GUk);+j4e5s zIbmNsLi@hRtm9`AAOR8}0TQ^B3BZ48h67_leOEa3LGA1alp){GgAJ8zT8;y z`;q_&kN^pgKqLvk)oml-YwhiyMKX8xO#&o90wh2JmlFZ_TApF59jnXtu+G z*moa7t~>uZVgf?f>9S>Wa^f*0KmsH{0wh2J=M#Wuf92xZXtO2iZ$ji7?Ka|bz`8fK zA2aI3+1A|H2d^;u;b#&c0TLhq5+H%^M*x1+>)?8R?D`ICw3Bn+uJ=%5zjMC6?fYfL zqe*}SNPq-LfCQWb;M@5K?Q4Z=$***_3AK&mZx86BPh3){V^~iDBtQZrKmsIiVFcjX zXw$bBc65Df9RCxu4}AVjbNb;iBtQZrKmsH{0*w=ZV^{f|TiSTb;I)RcGlREc4GE9{ z36KB@kihpL0N=`W;=2%wn%fVKmHWLmA+Bz2A3Tl(NPq-LfCNZ@1W14cNPq-LfCNZ@ T1W14cNPq-LfCNY&G=cvEN}mi6 literal 0 HcmV?d00001 diff --git a/windows/installer/installer_dialog.bmp b/windows/installer/installer_dialog.bmp new file mode 100644 index 0000000000000000000000000000000000000000..1e32ba7d9cbd8334302cbba45a7622731b6706b5 GIT binary patch literal 155914 zcmeIb4_s4Mw)cG==rEJa%{vt$#~K0CMoJESlXof*Cl{vRH0d#&D`~DU?(bP^uf6u#YtQ@#r7MJ^@|$}&t`M(Z{5Ou9fuH}zeTRMi z%l{t2MSPcIKlqToKpS1geYClWn=|b}uJ7VyuJVchms|Y9$GDZV=5YT! zBaK`5a0=?-iA99z!`I;;HaRztlXgl}%kLPk9HEiY)gB*=RD5!{_#)TbN`QtyV(8#x8?^HuJqB_-15g9+#B|vajR{a z+@_ZobLBst!`-vCmy7$4KCY{ND|dR|LGH)VQ#q?Hj{9Y*$eqCX`Q;ZlkJ-wV&i)w} z`R{$)H>W@4erL(xdOJ>X5A6M#`|3|ext~AuFt;+@!M&6`lZ#%_&VBvi$J}qM>74zc z?{kG<^lHKnxxPQ1;ChaCaElUu#Ql2a<6Q4wzu->4vxEEm_nW!n|6b4C^KV~svE_f} zKB?c%mHzl2xr_gGkuz5u=XSmP8?O7WUve+cdV+K74cx^;N4VD4R&xKJPR|wn!#wWG zgNL{?-}G_O-d66DclU5#g5|^OKI7WUf6G1p<9S^7?)N!Y`p>x^M#XT4{_S_1{eB&H z{-eKe_O-{jJ*BU4KdL#!>1)q%Z~n~1{jq5$_dn7e=YATa<39P|C|8-C$F+a?HTQSx zy1BF4c5$Z~H*tUY-A3-z+JECZPkh9EeE5%C{r_CZCD)zh!Yj{knZN%FSNu~K_t(Fi z;c6fM8CPbpabf!$7ku>hfAEtBn|6liV z?|1id*1xxN74u)4;xhy>Ed#l!R>2K`kezW&KxugGafLr~` zf8iEy?%=AowsSvw=6`X2`Lvt+O-?R%=EB$9KV|%!`_~^ixJ|1nIC1Gt?%3fI+&#a( z%%#nGihKL;m)xfR?B!meIbIyG`naMvg7z*Q9k z43?nnD{~224FrwZ%^WyrcPk(GX0QZpUl~i#Y9MILZsx#2yIc9dH-jZ;`^s2?Rs%s} zb~6VK+TF?rz8Ne*+gHXCv>FH+vzs|^(C$_~@XcTe+P*TDpw&RonBB~QgLb#_fo}#& z(Ds$F1g!>w#_VPe9JITY4}3FNg0`=WC1^DeG-fw*;Go^DeBhhG6106~EJ3S*pfS6d z0|)JHw8VDM*n>ldM?p8kV4X`Mc{TnDbWrRw3P3ixB z4ByCDf>r}Jprj0yfGfjQ;nS59=fs2#Dp#I|8ysmQUkB}O7Ko) zDyz+;d!f6s_>ZUDr^9C4hf&LVBVbBvmmv*-@_kdbl=<_RdYvP#V|lroJ5oZ)sTpYn3#w~AxTM#7A=bLfwrmOZc{Sm z!yMu!KE5Ms`u4b~;i)j-;i*azW0fi?1Z*Q_snja9S{WWQEi^SX6~32H`g-_djPje? zqiXKRs~Cm|X=_s3YBi^fp`wfl2k!_Kxl@&~v9Y1}QPa;b2e_$^@5q|IJ#K0kF8*dY zbxKTXs9GJX4xOTeU@L)|5vM;wW2Q|DSF)yjn4^sLL zTWq2&Ha0Od5wA8iD@uH>Qim#u_i)D;_2+kFP2V0jH4GD|?oep-2%9Q&>g0Q8I7OXK zAqaw2D+s*Z<#IYF+&3*2av!Uv#(kKBjQZo-qiXKRs~DDvDrrtq%=esjo_F&+uTby` ztyZfrIUMx2Lx<0J-sQYEG$|>GfQJuzpiz8rN7nT1aZ?fv6>{t{K@DNj3KgC>d4khL zREhGHa^*cf7X*b)CpsUS3cD>RXzOhbeZ?o!tHCDVt`jFrR@>C6Q((P69KuaWOicV0 zfTt=$2509EMBHz)I=YiCN2-1}%Sab1m8#e&4>(;8L1BVzrntU*vnCwdC;(g#V8lHZ zs-7M?H8B*rJ@KW)U;QdIF)>u;eS1AE5^pI=X}T(Q>b)+VTOg7|5#>7K?c>e1(&+@P z&gJ|8nsSJCsv5tEcPdIC0zGJ(+pCK1sHH1XDwroG4q_R|pLbLZ-(ELE)vr{lr=#JWcCU*U zOfb213I#)r_t(WOsQ?qN6BUA01a1Yda#)91ctK}*kwC@S!vV0*!BzdsM7ry(gRDT#@*LS){z z*W)6Iw|eRfqHRKN+oXZG!m$Dg=ZQBp2-yX}Q9%u1S_k`kJz z3W=5V`E3Opyx|Xj%*jI;^Dwr3eeV>WAYM4jDjcTRmcB_(3o z^dxl7MqX~K8oIMC1~5YJMF)&jx#C*hI)zq}-y6_Ypsy~NHKN{3$kC3QfAKJUEfLda zzc6y#+?`d$x7FQ%h7(OTj4q%xf$AU^3-r2<51QfCacml`@BO$**fsrTj*wXC~tO*8vPQuo$Kb`aNESdN1 zM4S0jbpk}2b)_W_%0RF@#8xn2++^T~L%tV8omDI9wtE-UmgQ>_1bx=l1efjM7iO~& zn2`zj?Nvp0)YSk4FVeuami4EI_&}UG!3wh*Lni}29P+(R5JcW8xR&L1?{3uUUA)F% zNH9M(XBK#mbb$Phs^Qz~X29D7`G!l4T5uDS@PW*&y6=8>NKHQ&x@a}I6x+o)3%fr$ z+U`nls>HOuamdEaKd{TT6@m1VZ)JG2HvK|z5D zy)^yYNz1x>Kl&n{$HxvsmU$AibdJZv8ypVv+__l>yIU|#nmae4FyCR-=e(Wr+u!yr ziQfFx@qEElP=M`o>~>uuVjQRV;+7*)r=;Q$yFs~~D(DWo7S#Lz-kP~N4sd0>G2$hf z6CPgJeck_}i>iBlLovV49a(KcGCmXMGSao7T(YLzy zezxUgMOnUG$7?ipH+*o40=vc_in<)VC|Xl`cjsHpa}%_J%j)@gZtwYZb^-}7 z-O}?FWhR}d6?6_V-+2X;dX7oqaJvh0`@Z?2%x=!o>)iRj&F%ecQ{Cd{qL+SBfzJ#Y zSp6^qt-L8#9ivim6dV_jseXsm@$GawgLw0boScFj@{nnD1qIu;z1H1#Ze88xLkG&{ znhj<_0gH}k8_ecOI<4ECpVHm;?)i#*vtDm?FU##cyQ*&S;>FRcK08ouhnmnjz#IKJ zosei_iGRR5kbsxlsed9CyGiGizaY-_q!(Q0mt0_^RBkMYCXlfSX7!@KX?|j&nh!2O!~h zSRLO^w{tabgdY;{9K%+%e>Q;-{ilovdJHzQMr5sji_`Jkh46sAnj3{JKH8ohs-KyB5^^ z0Q($0?wkU<$Icr>hbO0C!Lr_y>*`2k*Bv_G#b5~HS9r!dCr2Y_`MiaDKZ}MqAF3!T z5Ek|_+ys2_<{hxm$<7CF7=L(9M1Cr!5OWbS?_2u-NxUUr3OpF@g@Q_v+}U7T2x%q}*Lletzv{z^12TarDxo<@vlp zZ@|zwji@7FSz!lo1h)q=9)4@9ajKW~UO1*kY|k9J=wFLNl|`DJj0qc_bEE{@&- zrS0&zF+Q4O(4g%`2y)@V?yokHJkyt+{%Gks zGRU7=b@D)Y`T14RcuV5_RP?%&2g(G6J_p@4l5adE=*2mwC51!;(0t4B4#9gS`rB_w zyxoP5cAt44{*rvByReeHCxnwQq6II<;5N0$;0-UF;91svW)svozKve{RYk>DYwPIY zppB!~?KoO4phu_6A>1e!M;Bc8s#WB7xnT+3?cViA?&r<4IXcw2-6vOVI&}27x1iwQ znfDn^04Eg3d)XxH>qbT=ypNs(G{~7K)zwvITv(8 zq9xu>Enau%qciWXf-MjFv_mxcU<(S&Ry`gh79?dtl)7uGDmH+O%E7zcyZ*?7dFOWb zZGwDKgX}5$=rVY-{unUA2K%T{Z_wJ|htX<9(}ZQc7vMc5@dn}O)o0JVzl3-LHh$1! z2k#t%)j_;T!r`E`^532|J%EfacRiKhRlwD}?Xz>cp`+2E+605Etm2!^tT`sBtwSSy zrYwi?&LVHCr}oSaZ0aLiw`#|kci~NC+tU-U6oYC7W`n>y?j+$RtqHRVCrk;zU<=;u z-t|ZJ^JeB6qLI3Lx3D&Q-KIldlvjMQge)*}n&Gu>)u9TvK|$^}gBHWvjYpTR18xR< zab5JqKFBxYPq<;v!wF;7Ykj;Cy^4rNh1UKU(s@mfP)+B2+=m)3yX&b0uL30AU_s*= z0<81CP3UoxIIr7rwj%%JD%Pi_PXKn+(Fzk8-8yu>?K%0!&u<}Rk7ohpxh1`yQ2R|E z5b)?FXUlU6=F*s`R9Gmxpmipyrm3eTsbZ3(k&~eY#DjOccm0ti-lB;{LbSqtmA&V_ zmV}fH@&miiQj;t(#G7@i&$#V5h)gu-Nuie?J;dxe!u`}!oBDc5pVOm3&qm#<&&ttr zqyBn-p;PFbp|;R)7;S7ceCYA;yPit$DnR0m`M>xl=w@~It&NsEY5=$D3Ly27bbu&i29^$Aq0WM~A(_%}*LI^Q z+55tW9~lka?cViAmUs(1O*ar+3z&6I2mu+L=nH4hp-pDI>33c9=Ei*UBtb|Z-k!4K z2X>$Zr?2sU-O1i_7(1msnS5_P^o2vr(fQRmIoL4o5OYb zv3beT^T+cQ2|2EQZH_S(jHw~=d4_G8IyUAycKR)-hr8vJetizngF)r3xQqc1l2E3? zVWC}Exe6|H6cXIcFK+6Q5 zCRzI@DZxY_0aqvldu*aQGzMoLuG}q^e+%vtRSpC!0$s;j-Nz5Cr2$T|&x!j}br%uO zgdy(5bqDQt^C46$oKs0!w-tkt%1;E{Tq=JN@G4dsY;ur(2AzDd4R~9#!ODG#JXx zpvOkqpl>jn9)YjD#XB=KIP)s}e&59_c;zM41Y_PfdtY$2~X zHR7bl*PiXW@b!x;5AE8yGc&z7Jt{fbTU6k(V?GH_GyC(8e}eH9;C^xKADer>*|2j( zW>kE9Mn-04ag?RH)|;PywQ$p4CImMjzqZVKSSrE$;%eTye7heUgQAW$F z%ZB2O&WGJcH}z>?A{8@5`k#5792+Tx$~Dl-jf*>6}KWY%orIT zZ&{y*$-Z_T(+Ul|-2)2`M%vm_D25ojX;&9$c!77yyl=^~ zcNK4~1M=;%mmN5{iS@wIAFErr<9K0)nj1ZvpYEc!P!2m5|^p zqc}xs!=b0QeEQ`V6-Hxv9MpN7(U_SI@}0GIptCc?9yJN5wtTi z0g{+oO1-X}EQ7+HZ&ze(70ZsFIkO@iCfeTI%8p~$jC6WjTv!->lAk%~Egv<#`sIMppM)(8ff;k700M>Mlj=9T!Qha-1;?65d+t5#d8e zxVnf4mi)5v9%Dv&9C%+UO;3+R8y^)J83uzbDk^QS%Vot>a+97?K~Q89;#{z5g2Fyw z&UaK1NCPlLyy~r}h`Z#R;7vK}(G~-TL#G38OzhX@SDgHsTxuU5TalT5skces+Ks$x>HCc?FP{Vu8dTXCDHY?|hU03~oxm~zZnX3XMs&`8Lt%`5*(F15Hz z&6fx_;~iy;OOK4QY%KFq+7GN{ff;2czR=rhjJNF0t?XvJBT2gBCEleA-nMIzY7>mX zn`R%7vmRNu`DqU;m8nCnbaN{z;x0KS@z&5kngn7pnX|I8K-*D3L!C>u?Al4q_NC@_ z*xXbiBcqIwnQ`$JONY16fqg_I`9ZL%Jr7%0%5y6#>(e3FjCWYPF$}w*_vYb|>Vak& zl6a%_Cf<K3mTeKIX=r zwoo;eoRxWxGj9`&D3(wUQR2X@p+roR=4#6-zWDN3dL-k$`v`bPMnyqO#{+jHe@BZYTGQ9w#wYxs?zv42{(FU>G2jg+1hI{EhRxjE)a+}6bTvZ$h|4> ziQpX}^Bzy!%syvLD{Eev9I{kfbIp#j-EF0oC^*6|H6QGt9u{fzF!ZXy8{;M&EsfqH zj|21hvkVFDyz-gJ#Ji`-#~aOj7>I`%Ej20m0(jdUJPoiwAwZ!MZ}+!jZAvVR1Qj!= zjB5?tT_;0LgETxR4hJRU(eM}v;T3b|PAU+z`6*SU=y*3BZ_Y*YEb)$uOpgTeVq=u? z`&BKCd4)RBkf5LBF0B2vr7d@1W!V!@<0RmCEFz1c|I_1*jd@l~BSUYG@n+pIggzjn z6ILIVd5`b&2HqGHbHhZV0`lNZQ-%r(90q;i3tegH>Gkc+mF3BP-jLg3G|_09jmGL% z+Z&5KIzGXm_0%>#-(I=!o`Xk-H&lKk9t>FN=&l=UQtUbn_3nMVDRL1>9R)|I`r(M_ zvV4yxaqu>Y$e@b6SxBiS*ii^N0#jhMxtxthk~21zH|Lf=7?p{BcYHiR!jTe2iW+ zwcVTNal5QecSB?D!h3#OL%|NjnhI4W2Gy!k@JyiDVBK$%L>s(yP8(LZ!xaoJw3}Rl zQUj=JQ48~#d@jha2IqMW8PW=jjKT>c7S-0Ibu7GRVQx7LHQ2@QBIDlgSj7oC}Gl7z=aT8ve-9yi>!I5PQLe7qsySV36kJ+2QTc-Izs z9D)gJ3n2=T$1hCjM}ky?NZ4J4`AB-a?6!Z@q`6g$IbbSOI(vDtsRB~x~twW!c zlVAJW+n6nhdcUW<{5$}tOq0^(HN^A23 zgW%47bXjF3Tr+T`V%P*SP8SXvFDxDcPP}2G59FvDPW9$lDw9OihK5%Fu%uqeXQ5cmK?J&FW9%=oCs>asT; zeC@T|Bam+lr;$O&N_=E-dYHxQfLrEj-WZd!t0QFl++R`S@=CmuEzhj?7I_N`i#%Sh z)Ai0f&KVx0JkBXF=`fOF7S_N0t$QjL!WABtkr73_5$TeSRvC7ik$A^t#wAzH!%C8k z*{}U;Wmhr6BA9&pcw6E!jmE0`T-Wf{3%bzoS#p1DTtNFy0^S=oY&e$L)zjA1^=0t~ zA2fA!T`DiHtt}MvV!|YtZCdl(?fZUSxv<`t9B)B?49c7gbci;D8%A4k28A-l9rU<$ zR`+Y)%Izs-@oNZfk+wn$9~Kv8EU$IL#lnji8ifa)+HkYpsxHIh&%WktXYIUxjDbFw(__IiW_0c1QC#_K)}tXoKzULNKm(EAig2 zVdqXFzGu&=k9UFpu?;JVOUu13O^(@>W1c(7@pcOuaEeHd^s~;Oxd$MMQ8Qyye7do% z`O$;R7A#uUyzoe}g~g^a-ZXFm-eDGFOq+Xy?$YJR2U`!qNHa~&ztl*3^U`g zxZ=#>jOymfmiF@Y+REG-OBC~{67DF(s3JN74MCVCxy9?@wU{01mv4X<1RJGt8C>oz z1mk+Q#Czw44>s)FvuD@47e9RVLzL4O&waWR+P%XIe;cBev{t(lL8xuUj5x+SgCI&q zTNGM-l=O_s=1WbbO{IuPF0~YsGZlUq()=*y;}Eq7=ex1WTLdS%h=rpidkwBOK}Z|| zx7_~@-Xj3s8{Q*bzUR~*FA!{2p1t@krf@^RHC6}aoa8&)c8|9$IT^8Q{q{L%VqPE~ z0b_Ay+NI|1(qyBtqPcvdB??ZsI1GIgZ|b~Z{y`KBxcId8JeL-0j1J(fRoshpY_2K6 zyWP9~$i$nKJ$nxCIPK^C;fEKIp&|298EEHY@(@a4*)ufjQ<~Z>g}M-KsBVO;mNqwU z#8_EPbLGb3_%L$1GVzvp7sE4ymLSqtwZ2fRr-bPJ@~z-!B+0zT^C4usnXR_r)Zq&k z`-xv%y=TLUj`BiRq1Wp{##Fnvz0?TWd-m+viTV8GaDgR;3E>&&mm%V5H|C95_BQvV zfi)E--i$ZJ#TdamF21^Lk)S~jd;o7@hU{M(Pu$f1-nsKVKs*NH{L{}jJ?kTW`ogIV znMcaK`7JGtMUT%dDBFGM*ovKd4sSVh=>5Z|B<9p`7pG@fGD@2(|7=Vz?r84bh_+qA zEj8cJ=d7g%@632h&mvyy?&nR0y5LM2BBAEE*1+9$GVm^L>v&I6>8DPe`gF@_X0v^G zVb_M@BfHyc>g%6wtM!ys9KF2f)cYrQELrm6k|jHKtp0o#8hFSo8s>C}cYCrWGM&U5 zVNW!g;w#Vr_!H=<>z;1x@Y=E1xL>~Y$OuBbaS4Npe|PzH%xeHO-BI3F4|@&rjh@+_ zPcQmxbnvb|*wIvcEbT~J?ePQW552!-$;!=jD4Sn=@x`TE-v8sr?_stPIF@!-)>w=g zPc&B^NeAyMxTU~F((S}M&e)ajL8eweZ`kL^h;Ea4k2%!q91y&{dF>CvkqV8zVZ#SI zcU_c>wu`$qWTx#ta_oZ-j$!st#aBC4UObIFAC#O83(z5--pY$RR=@xL;s5;8e}37# z+i1+lfRvXS5%l8cO?n;gi9a>u;C-aMkYdoUwBHzDw+)hQ`tv&1D}Qx!HGtan6qZ4Z zo0^Ut`{3BIJ%=UUr!Vf>bF37+$@5Zw>A;0&pM91RKA;eAOdfsq+0!T&zWC_q<-TuL zWTw?)=)0KwFg|VW!wngyKqwkSGuF2hxoLf>e)}9r{*q+gH%D+}d!x3sE^m8PO;=Za zF@*fRwUWVxd@SI-_e+v{;=Kb>PYL)KZm6~$!U{BMr4LeW0DDj5FWy7(w-5sSgh;rok z*(HRVWSf$+G49~~?1#^utL$64ZrzfTea#>BoqZ1}ociM&e1;qIu`!D+JwCZ-(LmmM zjlc^s?{VSmKMBdV-7Xm19&dXW*zS^epI)+S!_H%tz2#l?_4QqsDn212j^UPM{HdpW z;@#JK7Ri0q9qR4zP-%HPeb2L^T>!n=xSm^|vwTtI4kC-LYEQX6Uy_o58;th9um$y)x za5eA5Ax2F67bm}tc?~e$6pZT1_tsu2MGKCRvYj*E zn(uSHA8kfHd}P8t19eViG5TpAo?iL>o)vLX#bF3l#n?Bs;P{mupK)Y;kq%m1nqR^) zJqVMBSb*+2*DHT@b2T9G)?;po#^r&{_GVgfCUR;ajIFf3Z4o~=Yi^Ds|2T3zT%_(d z>yNQl#`{_Du6u5A^x8iEI^p<^zB!HImKB(1f<74ZxSw}A94;6!xx!ng6?$$MX?22jzw!jO=)bt?jo?GEq3wyx^Z((0P7w)TU0+O2abeGk&w|LN407s0Y0 zxFp}FA=m#^w-_r~V6Aumvmiacbwn(xhwH#U%XKYQ^U+Hg3{GZE&-c&8(t^}&`!yy=R3i&&P@Df7NLf+O3Tcq10v zu-%|fFl)>SI)~d~-EK7`&@#YtvvRbeL+>hVe=~09u5+7VjS+Jq%XnWP-ix1FhhEth zEOQ_|3leWU4ChX;1Z*~hpTQ(fN@Zx61+lTwZXBls-MD-PTOX;&0FJ_}_jQQzvhhYiy>vLrK zLBvA}TR)ZtO;E771@E${G36j8f*X)%aSZNDKE%*9An~S&n`*?ZUh{~T3FkWX32;zM%L8GOhm8k`t+}B z5#w^2MpYzxZSi8#=jeV@`+YU<4==v!<4r^0VHCkOK)yBbynr|Jx2RBXDZVoFrOKa< zK@FgW*>E%it}zwl(AYGlX)AJa<{|^YqzBrLAY=te%_9+Zj6&|WJ*N(D*+IT?$oE;S zOSX#STUtkWFz>@VrNQqjyxslsO}ur2doofiMqGtkj&~RXu)`dJc$39U6f_z$E$FL3 z8V_D-#^=7{K@3AN<`KogJ(?Mp#-5CT*@660#?KjD{#2afO`0O=} zo<|-q!KGxp2j*EE^ShJ}=&u3Bn*sOZ#vgiRqX> zM1kj+^jY84+xO|`=c)Z>K~7hApZ@UKO`q;r5p_M@SZPovIKq>{G3P&GK;Aw7#mTQ@ zUIR>@lYC1B36D`|i7;q&l)_rf8%a;cjBd;>z^FE&k0~h=A{ek54HCCR)->-uw&8;> zzdZld>a{T4>*~-ALsZ+2)rWVzw*k5GGZ=3c!-mbtUSkW$(pj+F4gQ-H4=9z&h!89f zEiDj@_=hpSOZkBQ8USz7uY(KL5~bB$#iEvaBO=c*K?L(jqKssvOWE`gw?v^!=|?KN z^bOW{eMM% z5nRPF-bySy=)XAmbWehU!$?`qs zc&|C&FuX~;btWF>ngtn>G1-$6DYp0Yw00fYSe=|~q=9Yd`E-(RqY;rxZOuK!G?{P( z0@zq|3+a0-TXQvUW6wd!zt)c&y!okWHSrFYc;kw$xk~wiv8sU^@TTS)@{J^F#^SDp zwJG^!>({sS^mMe=9I1W<6M!v87A`EsT!W0R=1XaW*pf-3Vi*R+XVk#6g^GOg4FT`) zdiqgIZ%kTYhD{x+Oda5G8LOLgMF* zTvG$X@Yd>xw@LJRyP7E5Qe0SC2Y4gHs;I#lHChoF^4)IA$mq#iG|yYsnO(WC=E%nS zY5>Q7wAcvVm$i^b*>~6c;QxYj4!B;YvC53u}u%#}E{vAQ|=IM>v`aJ&fw3h$})cBL0%uHVLG z`3~e}pp{P8*a{o=zy=slqjfNNH^kkWlJds)VW>%_8q?G6^2Ncph<3&5kX zzdRja%AD*vVDFHrZM~AM!fCbwl^`UKfSaC`4&adL@^Kpcxx1P zXaciAFz5{eZ!aubQ|-e|*;OfF4=go_H<(8njYm?z8;$k=-X?^(%t*9_rs7(pGG=!t zA82q5po075Y=}3!t0?C&DjINaJ{kn_q+U4^@Vh3fZ87~G^uZS?|2<|ka0A|gy{NIu7;h}CS|sQVW{u82 zhQ_??;ATRCd<3s+wF0lV>K#Ref2QuY&sM`#4v=r+9rZ-J*T=j6>0&F8Iwz~K78xgv zgOAzW$p;!-12^DpcX;x$pSL7$EVCzMC190bEI`N#EOl0(jA^*lYp<8qFF_+N z4VCzLBQHMWJNcP;gLz8{Hr=j?NdS%oMP=SWAuS#B2E27*f`P}(QS`mBf(?vwif5Ee zbO|`gw*k|Mb-KA*w`OJOzE}Qda7Mv2N5<7 zeFNTCTG<=DL#AtT$YDf{mZOs3Qg=*ppW_92H&HBqF84STqM&ur+BIkvFv?Dv0zp-} z8L_dcs)!e6$@)Ba%*U9w&gHFoWk}vqMpfedJ%^wbY2l+myfxx4Y_StnNsDCOe-peB z5M$ygA_nmhMp}51xm*15jaLYGR8&*jLC)66ac)dKNiE(+V8R9P3(zxp|_@#@djPU_W<6JFyydI{E=mDa@FZDx^^{htw|K@Q<9X^ zW!`@yyotHT+X;mZ`DSGRZ_2DnBc&KI5ePK=YLo~}@}pBUivm3jXS@HUZR zrt#6d_8PX-C|$EJp(gXIQUC-6HEql_y&IM7U6_qTgDR$b&YbRpTP-_Z7J{h6FwQbWFab)8PWp4~K>CnrsUX z;{D$3hnCuJTK}6NA06v~dtnMv$4s)<81Me-WfmLzRo$85&|yrC<}*OAUu7cd(5fS$ zOaET-o3X9|@TPb)5QBV4DtREik-taGdUmysH#yKT1}+sJ@v!gz2y;Zye$#vf|?LT>}ztg;p?8^$%37{R~F-Mx(tyuNBit;X1vVH?AE0 z^7yc*rdp33%ZiJTZ!*}f@RpuBv&J*lHz^;lzXrgY*NIZW`rIR9JJi-1v$LopEexSc z*Yaj(jmJb4hbcj$qpTs<@FuI#(SLRFo3X9|@OI6(ck1NHlkc72bcsU|9NA|WCNFGz zg4yRJ-<0{D#2Q7SZLuKK)ox0_2pQu|o)=#s`6k}(vA#+9c>Og1-uK4ZY-;@2Y_Z>Q ziI`a8t9B?_%-&=P0NAg7E`B~kzH0OcajZewb0hM8&=?ukHHb&JDvG7Jw5k4yC+b@p z3nkt#*bv-81&5;c94(4oV~~0G;~Dce;2mpAj7?4DFug;SG-ryFgZ_e=}N;S6ZFCycDKk|bq><$ z$O*+FXAmr7$P&l=F69IIYXH0vRuVf!IbDT}8me%$da}zTXc1-%$wLPjIh%+#^Tu4o zb=W_6Mv>)HSySux+>j}&jx4g0JZ5J;JBGiT`y6FTY+UTI^ z*cd7Azc~4I%xeI=`KgE_N~O^6kdV;nw%EiOBJ4Iqr11)jVNs*XeiQa9w)C*LF`v*? z*!gl=DRlRPPj|Gn@9u7H&h6fPyyL-Scv-Q+MP^*n!aR@5?Gnuidb8Vu_;coILradx zIGFCHF~3Xsfc_c)Z(&Ag?3C2hR3#UpN{X4NoMU^yg@q;%Lr9G|T60V$zym# z8mtD-^6Qw_0Lix?h|Z~@NU9O4ObUrWZcB)HsLF+8t*V|=F3JJH1(vS>mQ3ugmy z1h#3%{4V7K`fGsNZx@nuOh{C#VlgEbHaciWa1k-gf}4Y4n+EL;F%Jy5-I0~$YIw$i z3=dG-Sha+(u%6$|2w#5wBJ7`2)t9CD5{w$vvN;kfD3)l;UZZLu?WOby|M>+n{%+#0>B zw(&tD;kNv4&D%+HX6W~C_2DMsC$dV`WTOE3`j(e1#sAh`i&Z8QM6}`Pmcc~VjQwrO zNA%TzkGBGSZ4-!3m;<2k;<-PA1C@UGrh(Tj6l=d{J#1l=#b? zWs4HG?cbUOk(Pw}q4|*S`6ssSlXy3jl(e*WrCHM2Yn_@LJ?nc@*s9Dsc#o&V8=f@8 zmcR^$j3c|HI#imNs|=@>9K0h^W0WdYOblm(wnk7Pa&-+?+;&GE*1#}6*s|=q?-(H1 zS^Issfg0pb;MaDaeB-69rK8%C)=^uCOTZP7iJ*{q2c5T+F$u)2q}A1I$smaJG-kTt znc!3r;ox#* zIwXQiniK1SPknf8j*M-HPc607x3!c&kIz4`fBwFPl7_8By1z)gSp!bITiWW8=HsA; zf*47g8}xTQmEcu?#G7D~TNC48Can&+)b5+6wyCFts@bAJz>S_6e$X+0$%Ur^%Z8bT z(dT?&-tw1?HEk`OCEHItbYlL#<>-99RpM{J8S@i{lI+gT2C#;aPCc`pBbFx+rHmkD zTmz;e1n+k5`Xl?~+vh#Qq#p93!B%%Y78|RMja4H19Z17gS0UR5dTX&RD>dF%B%Fs$ zZBih85Bwye-YV9%Kb`zwXA3y)duM+_$(oYo=zT4RfcJ~{LnpRM{jrAZPKkL-$IHn_ zj@P=g67)JuCX~m=g7;YZc}wC-V_=9iVO?w|hDq7lVAH2_F)^xCjHdwnRMBd>s`pL2 zHTXfc53AW=`gVC&T6ISY$?ZPpKA@J$zATA1sdZ+cmt_B8O5DwR;~53AIvp=z6xjfoF1*ZjQoR=3MQv5KWL ztJ;7tyQEU%vgs2`1mfd+Q0qKH&b{-;s2aK1abSXQiDfX3^@h3mRSUZuhP~ z@{sb)A{+#_I0IP^(K(xf?ssS`B4E@p-x0AM0j#^L^*PoTHt4Mm(X1`XKaz|f2SS+b zr%k|_X%5lezyHLO+qOBkmo&Wkqkn|f_w#P)*qB`7wd(~t;vHo94&Gxq9B&=mXl~JU z|93*sipPdxxD+F3(`MMw^+wi?fxKA@fK~uuDrV$MuYRS|hx-rIa#MkU4&LaRJ-Pp# zcf@y`&Z5H3s%PH#HPn6^S#2l{Z5@wPcg_}&?n#t+2M_n4d>+2`8|yrxBdoI{Q#FE> z6EReZk~rDvf;&}lwLcYmm`n&)F!3&{sJXwa$x_qCcz0I)emNXyCHorCio?^gZJU07 z*1k2ZRjsY99j#CQ^UKdyKV5UAszrJ@vfHxXu)JAYC^`_nD95)15BN3mE%mg}KvMu4 zrk&_q&I$KUp8WW|KXCF^0Rc|e@P>JZi79ToRYbC$(wQXQXp>uC{li-l?}p_i`%h&3 z)ZWlg^80_DIWzhD_(@Jnd!#rmIr*PE+N8&$p`|04axmLJa9=Cm{(d#NHc=FU z6(gW*I7JZf!n3SASM<3QoPs9Z&g-l*^4_#O-Nr2LHC0uw{sFxAm8@A_lCXc9tD&`e z=FFLCrPa?r|9o{d!Wr?u^pWJ`E}wumbYkBWw*ynlWcdyr@N48-;!W{tM4R+F3A%{| zvEh}6vjzD5PX$c~qL4H27EI3j-!8RC##%}1t5sEhV68Vg-}~R$zWkNsG;nSzO-rLC zFCwubN|+Ht<5;qY+Hdl@bQqh;irjWXmdtzHc}qOO@kRx_5paeAKdZB_H7yMm7_|8s z;I67-7W(q-`}aHl|3{vGzNx7x4J*`TVx>B~q(_D|)z>3v`Dx~KDamf=NJE#*ZP#bX z_W9ocZ?xX`d#jD4o=dXVka)Mg`W8&J!#(*H-%dOVpw6 zB;Hk3U9D)q(eK{oN~^9eO-7I^nrjMQjItDmS-P5c*CaQ0H`V-z`Ppb-6c&1|!<8Vn z-@yZZwY}ySTGkKm?~Gqh)P43juSw82y>HBXy@hzAmyLp7Yf3OsvUS^<9WogG$vgYnTr&)ShXyBxz0eSViQanf`6TI8K>yJEEycK$b)BAjKE3?)*81B}t zs<-y-f9Rnn^?$Hbmtr~*TaXO%4=h++IxH+LYGztnMOXKwjnrdDGf>h{WqBIO7e(1# z3*KWn2E3``#%s13ifYr0orEnLlKmFMyQ}IC?_hl7N%8lVrZlAIA}bAr?6tT|T77PB zb7@rRadgO-3$~%5!&th^VYkY>Z-3s1RVebNCHs79WSpb@u6m1j&v&g!Zc1ls9>me& zMGQKC$M-asBCT%Z8xU^VucV>Vn3m!ZM0tEHsJ!(Wvy{I9u5c1n^t$_J4oQFey5ZEI z4{poJK1T?W!r{rk&zoj!r@mDuwcXT&zuK^U|B3ngpZsZBHI|89fd%1Fc4lUVWn?60 zq_0?!Y01d=VJn(^^w>KwcG4N2)@Zl7^ufE`yZ*?~=Ni4}x6dfHg?RUuKzKu$(*&^m ziTR}##I0yq)09f)_CO4f%#vCzcp6*$Rz*BLADqht8ir|IN{gQsyw|4aoBFiv{hi{k-98VM1Orv*w^%l*h+{&YMjW zNKuOlU?;*?6#dn_9VSD92EoXsFH5{71N~K!;F5-) zGK1{|E$vAWta^ETEO@|4pQ9iX7p3V5j}67l z(Ad;zEdKa$CgrRz?h9BOQpz<7JA#PKRxE5#`hAKo?tF`yZv?0k#m*A&CWVf)O<9ob z6DRapTlc^8(!3Nl2+AT`XgiuVXen*a;}c}w!NbkkZw{F|*u-B>rAFF4C3O1~BxJM2 zs;4~Q4ZXv3At2&IZsDMP1VQi&RA zJnna*DF<*VGqzuk_rFr?Ib}6=*s)U08_6|WgUS_MN46Mxqjwr<X;lJ{)fdKP5+WdjwZe>nlyn@?sVnO)nw&Hf()q+e?_^FU==ggO|>?m(7l0ye0Xj z=oZAMejm-ZRU=wu-s8rblCG(elCVlIoA=F8h9;YODkVj~4sS%E8_)-LSV{}1*y z2$RO%m$g6frAd<-d~6x;25+smK{C*pi-rcr-Yl*;n2*_^Iyt^2c)KTgV%@iT8BdRAk83iLlO)n$N`gXXI>^ zc#}f6>Riag^gc-x$O!|Gcd3HrIvnRnI zx@F$CjyK_k=qr)hfwC*Y{Q~n0<7kpn2X7i)q15PRr{FGh=k2R!GsyhDQs68p5fip< z%|a4QYS@W3-ax9!NMj#i)HJ&VlS>|Kne7%$8Z3DbyxYC&k36&*@Bfr=Gs``6VxrCV zZ95Y3A&Dw}2JmLtb2V11J>$$TT6DznN-Laf40z}A)>iP|=ltn5Ju}uN+%ViEvGzRz zXzp989+_FS%{u?Cph?%CI6Y7LYhnJP_U#N^sHa39D1GwAe zDJ?CwVJ* zOP;q-b!ccR{4XZ)rPXsc%Vn8_E70VnUX@9`r5aP5GG^a3oMoQcaARV{?+yC0kwUmHBu(c%7hi zX+@Wn`ec@_cCyAPcZW1c=9g%x@n#J-@n*I;HQqJJue_S#Mn(|9r8jSr*Mq+Gyj7~y ziSW9`#M-9Xt!@#y@B4XEQbfqNAY%0yt7y;jb{W6_^m+i7;+o!K4j5mH?w4wEzW6_C zX?gnlMr6!&2ne@;abT0>`)5*pzZ;{=sFu~??CC19@f8Rd5 zZBwV(l+fucm=y!#KHd^;0*;(}d5txe+AHAyA&zID72q#>c=oO+7>V%{peRXXX$P#G-N5 zwi%6SKWb@MPlC)C_nT_)rAorRZ(sKRMrQn$+Cr>fsTBpY;EYY8#jt{Szjyng2Lm4T zH)B1JH>Ji7pNr7)>X$Oe=r+_kZ4QiYms4Z=+rD?`T+ETnC>@-Ynaoe2_ zsAH#wCdv9dc(`w<&)Ik=fcqbWkQgpW6`rb2MC9^!b(CPUUz?LUci44odXb3fDuuP> zHSzIjrO$L`BbKS5p`i^wDD<{r&96(-ij7^nYxCV%TMY^ky*p<_k`mF-6KBiv9X#NE zj>EiO0qx%au@KXh$c-Hy8k>5b6WLZwz-%(<(2n~&=?a=(3J(lct|@E#nZ;OIU4Mkq zq_Wb0-#s0_ezW=&BZAevc@BkMZ_x5cv+bH33SVBVGGdm@`*4W|j6y`T}J!pYiudlD)`1I5G{~+Eqr5Tf7?kFUrenY{~9ut*VhqS4R(44y|3}&3BMb4GYAgSNGk-#Mnfc zcknp-k30m(fIbQdQF551pi(7%*JZ+J8ML@0;y(F?BONX{haF>SFud~?<#`)@r3?>1 zQK21^k44c;?m6B;E7itgu`^k|{aAwb8W7weHlpdI5it=QR=b$&)G2@*0n5^m8Jjf@ zXPX(LT@JfLk1wz^C=`2^oM>_WP$`xeR5jy8kpmyIc9d{dGLjH?&f8 zczC!n5q)MSt#dJ;zzGB2sGdc_IAG6_{G%~c@LqG| z(Ta8de$)xabc9fd_+*TVBk%=Mj%BM%6yQerK+NVGDm4F$zQF=lI=2-~H#{u1SmhMo zn$!Kqqc_)FrToEI)xcHtJeofUH?`uSshBQ4?S5b07euMUqlzAy7Q9&&5G-wk&bVC% z*aBcrjZ(xI=pO8JAas(~Tdjp9Ry`E>YSR7oVZ$myk{ zxgx|};0vJQ-O%P3M6)}cKbQ>Gp)oNrH0xN6h9H)B!?)J&U6b|sSiv3QIHN#2^e!>z zQbm9wGQU82)l=_tQPM^@R9LeuiV*9^zk_e)%$YVti9xVgvu9&$U8M?-nZhJ|po|Z3 zo$^OxQv*Z88^x#8j6=wikicy!ay+R47J`gFe({T6yo9-iR33hK`t<3_P~?Z1MYv~6 z$U|k`W5Y6#ql|)VfV&_g2rEfs(S;B2p51u=KrUR~W>htBq)R-HE(Y zAT=-&45Qz6gvQ3aH0;z$K!x)D>0=aN`n^D!k zIM5sUDMocC@=k%&z{p{Y!ybWLxV+7%YG54bjrhsW z4)jKTic#H(yi*`GFmf2=DR?%iD~q2F8Kj$WJk7iJPUM{esezHh7>7Lqxo~-#QPsdW z&>Q(FMs+9hPJz_G$YG4b9)VoAyv?X;U>xX;{1l_Q6M3gVYGC9r#$k^@E?nMbR5dUT z^hSP)QQe8WQy?`kav0;VM<5q2Z!@YI7zcVIKgFo-MBXWo8W=f@ao8h}3zxSURSk>- zy^)_{RCglp6i5w>9L6~85y*wh+l;CP#)00*Pcf=Hk#`ED21X8J9QFw0!sTs7RRiNd zZ{(*K)t$&Y1yTbehcOO&1ajf>HlwP6aiBNyQ;h0P + + + + + + + + + + + + + + + + + + + + + AGENT_ONLY + + + + + + + + NOT AGENT_ONLY + + + + + + AGENT_ONLY + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + NOT AGENT_ONLY + + + + + + AGENT_ONLY + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + NOT Installed AND NOT REMOVE + NOT Installed AND NOT REMOVE + NOT REMOVE + NOT REMOVE + NOT Installed AND NOT REMOVE AND NOT AGENT_ONLY AND NOT WIX_UPGRADE_DETECTED + NOT Installed AND NOT REMOVE AND NOT AGENT_ONLY AND NOT WIX_UPGRADE_DETECTED + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wizard.sh b/wizard.sh new file mode 100755 index 0000000..7a314d8 --- /dev/null +++ b/wizard.sh @@ -0,0 +1,816 @@ +#!/usr/bin/env bash + +set -o pipefail +#set -x + +skip_tmp_acquis() { + [[ "${TMP_ACQUIS_FILE_SKIP}" == "skip" ]] +} + + +RED='\033[0;31m' +BLUE='\033[0;34m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +ORANGE='\033[0;33m' +NC='\033[0m' + +SILENT="false" +DOCKER_MODE="false" + +CROWDSEC_RUN_DIR="/var/run" +CROWDSEC_LIB_DIR="/var/lib/crowdsec" +CROWDSEC_USR_DIR="/usr/local/lib/crowdsec" +CROWDSEC_DATA_DIR="${CROWDSEC_LIB_DIR}/data" +CROWDSEC_DB_PATH="${CROWDSEC_DATA_DIR}/crowdsec.db" +CROWDSEC_PATH="/etc/crowdsec" +CROWDSEC_CONFIG_PATH="${CROWDSEC_PATH}" +CROWDSEC_LOG_FILE="/var/log/crowdsec.log" +LAPI_LOG_FILE="/var/log/crowdsec_api.log" +CROWDSEC_PLUGIN_DIR="${CROWDSEC_USR_DIR}/plugins" + +CROWDSEC_BIN="./cmd/crowdsec/crowdsec" +CSCLI_BIN="./cmd/crowdsec-cli/cscli" + +CLIENT_SECRETS="local_api_credentials.yaml" +LAPI_SECRETS="online_api_credentials.yaml" + +CONSOLE_FILE="console.yaml" + +BIN_INSTALL_PATH="/usr/local/bin" +CROWDSEC_BIN_INSTALLED="${BIN_INSTALL_PATH}/crowdsec" + +if [[ -f "/usr/bin/cscli" ]] ; then + CSCLI_BIN_INSTALLED="/usr/bin/cscli" +else + CSCLI_BIN_INSTALLED="${BIN_INSTALL_PATH}/cscli" +fi + +ACQUIS_PATH="${CROWDSEC_CONFIG_PATH}" +ACQUIS_TARGET="${ACQUIS_PATH}/acquis.yaml" + +SYSTEMD_PATH_FILE="/etc/systemd/system/crowdsec.service" + +PATTERNS_FOLDER="config/patterns" +PATTERNS_PATH="${CROWDSEC_CONFIG_PATH}/patterns/" + +ACTION="" + +DEBUG_MODE="false" +FORCE_MODE="false" + +SUPPORTED_SERVICES='apache2 +httpd +nginx +sshd +mysql +telnet +smb +' + + +HTTP_PLUGIN_BINARY="./plugins/notifications/http/notification-http" +SLACK_PLUGIN_BINARY="./plugins/notifications/slack/notification-slack" +SPLUNK_PLUGIN_BINARY="./plugins/notifications/splunk/notification-splunk" +EMAIL_PLUGIN_BINARY="./plugins/notifications/email/notification-email" + +HTTP_PLUGIN_CONFIG="./plugins/notifications/http/http.yaml" +SLACK_PLUGIN_CONFIG="./plugins/notifications/slack/slack.yaml" +SPLUNK_PLUGIN_CONFIG="./plugins/notifications/splunk/splunk.yaml" +EMAIL_PLUGIN_CONFIG="./plugins/notifications/email/email.yaml" + +BACKUP_DIR=$(mktemp -d) +rm -rf -- "$BACKUP_DIR" + +log_info() { + msg=$1 + date=$(date +%x:%X) + echo -e "${BLUE}INFO${NC}[${date}] crowdsec_wizard: ${msg}" +} + +log_fatal() { + msg=$1 + date=$(date +%x:%X) + echo -e "${RED}FATA${NC}[${date}] crowdsec_wizard: ${msg}" 1>&2 + exit 1 +} + +log_warn() { + msg=$1 + date=$(date +%x:%X) + echo -e "${ORANGE}WARN${NC}[${date}] crowdsec_wizard: ${msg}" +} + +log_err() { + msg=$1 + date=$(date +%x:%X) + echo -e "${RED}ERR${NC}[${date}] crowdsec_wizard: ${msg}" 1>&2 +} + +log_dbg() { + if [[ ${DEBUG_MODE} == "true" ]]; then + msg=$1 + date=$(date +%x:%X) + echo -e "[${date}][${YELLOW}DBG${NC}] crowdsec_wizard: ${msg}" 1>&2 + fi +} + +detect_services () { + DETECTED_SERVICES=() + HMENU=() + #list systemd services + SYSTEMD_SERVICES=`systemctl --state=enabled list-unit-files '*.service' | cut -d ' ' -f1` + #raw ps + PSAX=`ps ax -o comm=` + for SVC in ${SUPPORTED_SERVICES} ; do + log_dbg "Checking if service '${SVC}' is running (ps+systemd)" + for SRC in "${SYSTEMD_SERVICES}" "${PSAX}" ; do + echo ${SRC} | grep ${SVC} >/dev/null + if [ $? -eq 0 ]; then + #on centos, apache2 is named httpd + if [[ ${SVC} == "httpd" ]] ; then + SVC="apache2"; + fi + DETECTED_SERVICES+=(${SVC}) + HMENU+=(${SVC} "on") + log_dbg "Found '${SVC}' running" + break; + fi; + done; + done; + if [[ ${OSTYPE} == "linux-gnu" ]] || [[ ${OSTYPE} == "linux-gnueabihf" ]]; then + DETECTED_SERVICES+=("linux") + HMENU+=("linux" "on") + else + log_info "NOT A LINUX" + fi; + + if [[ ${SILENT} == "false" ]]; then + #we put whiptail results in an array, notice the dark magic fd redirection + DETECTED_SERVICES=($(whiptail --separate-output --noitem --ok-button Continue --title "Services to monitor" --checklist "Detected services, uncheck to ignore. Ignored services won't be monitored." 18 70 10 ${HMENU[@]} 3>&1 1>&2 2>&3)) + if [ $? -eq 1 ]; then + log_err "user bailed out at services selection" + exit 1; + fi; + log_dbg "Detected services (interactive) : ${DETECTED_SERVICES[@]}" + else + log_dbg "Detected services (unattended) : ${DETECTED_SERVICES[@]}" + fi; +} + +declare -A log_input_tags +log_input_tags[apache2]='type: apache2' +log_input_tags[nginx]='type: nginx' +log_input_tags[sshd]='type: syslog' +log_input_tags[rsyslog]='type: syslog' +log_input_tags[telnet]='type: telnet' +log_input_tags[mysql]='type: mysql' +log_input_tags[smb]='type: smb' +log_input_tags[linux]="type: syslog" + +declare -A log_locations +log_locations[apache2]='/var/log/apache2/*.log,/var/log/*httpd*.log,/var/log/httpd/*log' +log_locations[nginx]='/var/log/nginx/*.log,/usr/local/openresty/nginx/logs/*.log' +log_locations[sshd]='/var/log/auth.log,/var/log/sshd.log,/var/log/secure' +log_locations[rsyslog]='/var/log/syslog' +log_locations[telnet]='/var/log/telnetd*.log' +log_locations[mysql]='/var/log/mysql/error.log' +log_locations[smb]='/var/log/samba*.log' +log_locations[linux]='/var/log/syslog,/var/log/kern.log,/var/log/messages' + +#$1 is service name, such those in SUPPORTED_SERVICES +find_logs_for() { + ret="" + x=${1} + #we have trailing and starting quotes because of whiptail + SVC="${x%\"}" + SVC="${SVC#\"}" + DETECTED_LOGFILES=() + HMENU=() + #log_info "Searching logs for ${SVC} : ${log_locations[${SVC}]}" + + #split the line into an array with ',' separator + OIFS=${IFS} + IFS=',' read -r -a a <<< "${log_locations[${SVC}]}," + IFS=${OIFS} + #readarray -td, a <<<"${log_locations[${SVC}]},"; unset 'a[-1]'; + for poss_path in "${a[@]}"; do + #Split /var/log/nginx/*.log into '/var/log/nginx' and '*.log' so we can use find + path=${poss_path%/*} + fname=${poss_path##*/} + candidates=`find "${path}" -type f -mtime -5 -ctime -5 -name "$fname"` + #We have some candidates, add them + for final_file in ${candidates} ; do + log_dbg "Found logs file for '${SVC}': ${final_file}" + DETECTED_LOGFILES+=(${final_file}) + HMENU+=(${final_file} "on") + done; + done; + + if [[ ${SILENT} == "false" ]]; then + DETECTED_LOGFILES=($(whiptail --separate-output --noitem --ok-button Continue --title "Log files to process for ${SVC}" --checklist "Detected logfiles for ${SVC}, uncheck to ignore" 18 70 10 ${HMENU[@]} 3>&1 1>&2 2>&3)) + if [ $? -eq 1 ]; then + log_err "user bailed out at log file selection" + exit 1; + fi; + fi +} + +in_array() { + str=$1 + shift + array=("$@") + for element in "${array[@]}"; do + if [[ ${str} == crowdsecurity/${element} ]]; then + return 0 + fi + done + return 1 +} + +install_collection() { + HMENU=() + readarray -t AVAILABLE_COLLECTION < <(${CSCLI_BIN_INSTALLED} collections list -o raw -a) + COLLECTION_TO_INSTALL=() + for collect_info in "${AVAILABLE_COLLECTION[@]:1}"; do + collection="$(echo ${collect_info} | cut -d "," -f1)" + description="$(echo ${collect_info} | cut -d "," -f4)" + in_array $collection "${DETECTED_SERVICES[@]}" + if [[ $? == 0 ]]; then + HMENU+=("${collection}" "${description}" "ON") + #in case we're not in interactive mode, assume defaults + COLLECTION_TO_INSTALL+=(${collection}) + else + if [[ ${collection} == "linux" ]]; then + HMENU+=("${collection}" "${description}" "ON") + #in case we're not in interactive mode, assume defaults + COLLECTION_TO_INSTALL+=(${collection}) + else + HMENU+=("${collection}" "${description}" "OFF") + fi + fi + done + + if [[ ${SILENT} == "false" ]]; then + COLLECTION_TO_INSTALL=($(whiptail --separate-output --ok-button Continue --title "Crowdsec collections" --checklist "Available collections in crowdsec, try to pick one that fits your profile. Collections contains parsers and scenarios to protect your system." 20 120 10 "${HMENU[@]}" 3>&1 1>&2 2>&3)) + if [ $? -eq 1 ]; then + log_err "user bailed out at collection selection" + exit 1; + fi; + fi; + + for collection in "${COLLECTION_TO_INSTALL[@]}"; do + log_info "Installing collection '${collection}'" + ${CSCLI_BIN_INSTALLED} collections install "${collection}" > /dev/null 2>&1 || log_err "fail to install collection ${collection}" + done + + ${CSCLI_BIN_INSTALLED} parsers install "crowdsecurity/whitelists" > /dev/null 2>&1 || log_err "fail to install collection crowdsec/whitelists" + if [[ ${SILENT} == "false" ]]; then + whiptail --msgbox "Out of safety, I installed a parser called 'crowdsecurity/whitelists'. This one will prevent private IP addresses from being banned, feel free to remove it any time." 20 50 + fi + + if [[ ${SILENT} == "false" ]]; then + whiptail --msgbox "CrowdSec alone will not block any IP address. If you want to block them, you must use a bouncer. You can find them on https://hub.crowdsec.net/browse/#bouncers" 20 50 + fi +} + +#$1 is the service name, $... is the list of candidate logs (from find_logs_for) +genyamllog() { + local service="${1}" + shift + local files=("${@}") + + echo "#Generated acquisition file - wizard.sh (service: ${service}) / files : ${files[@]}" >> ${TMP_ACQUIS_FILE} + + echo "filenames:" >> ${TMP_ACQUIS_FILE} + for fd in ${files[@]}; do + echo " - ${fd}" >> ${TMP_ACQUIS_FILE} + done + echo "labels:" >> ${TMP_ACQUIS_FILE} + echo " "${log_input_tags[${service}]} >> ${TMP_ACQUIS_FILE} + echo "---" >> ${TMP_ACQUIS_FILE} + log_dbg "${ACQUIS_FILE_MSG}" +} + +genyamljournal() { + local service="${1}" + shift + + echo "#Generated acquisition file - wizard.sh (service: ${service}) / files : ${files[@]}" >> ${TMP_ACQUIS_FILE} + + echo "journalctl_filter:" >> ${TMP_ACQUIS_FILE} + echo " - _SYSTEMD_UNIT="${service}".service" >> ${TMP_ACQUIS_FILE} + echo "labels:" >> ${TMP_ACQUIS_FILE} + echo " "${log_input_tags[${service}]} >> ${TMP_ACQUIS_FILE} + echo "---" >> ${TMP_ACQUIS_FILE} + log_dbg "${ACQUIS_FILE_MSG}" +} + +genacquisition() { + if skip_tmp_acquis; then + TMP_ACQUIS_FILE="${ACQUIS_TARGET}" + ACQUIS_FILE_MSG="acquisition file generated to: ${TMP_ACQUIS_FILE}" + else + TMP_ACQUIS_FILE="tmp-acquis.yaml" + ACQUIS_FILE_MSG="tmp acquisition file generated to: ${TMP_ACQUIS_FILE}" + fi + + log_dbg "Found following services : "${DETECTED_SERVICES[@]} + for PSVG in ${DETECTED_SERVICES[@]} ; do + find_logs_for ${PSVG} + if [[ ${#DETECTED_LOGFILES[@]} -gt 0 ]] ; then + log_info "service '${PSVG}': ${DETECTED_LOGFILES[*]}" + genyamllog ${PSVG} ${DETECTED_LOGFILES[@]} + elif [[ ${PSVG} != "linux" ]] ; then + log_info "using journald for '${PSVG}'" + genyamljournal ${PSVG} + fi; + done +} + +detect_cs_install () { + if [[ -f "$CROWDSEC_BIN_INSTALLED" ]]; then + log_warn "Crowdsec is already installed !" + echo "" + echo "We recommend to upgrade : sudo ./wizard.sh --upgrade " + echo "If you want to install it anyway, please use '--force'." + echo "" + echo "Run : sudo ./wizard.sh -i --force" + if [[ ${FORCE_MODE} == "false" ]]; then + exit 1 + fi + fi +} + +check_cs_version () { + CURRENT_CS_VERSION=$(crowdsec -version 2>&1 | grep version | grep -Eio 'v[0-9]+.[0-9]+.[0-9]+' | cut -c 2-) + NEW_CS_VERSION=$($CROWDSEC_BIN -version 2>&1 | grep version | grep -Eio 'v[0-9]+.[0-9]+.[0-9]+' | cut -c 2-) + CURRENT_MAJOR_VERSION=$(echo $CURRENT_CS_VERSION | cut -d'.' -f1) + CURRENT_MINOR_VERSION=$(echo $CURRENT_CS_VERSION | cut -d'.' -f2) + CURRENT_PATCH_VERSION=$(echo $CURRENT_CS_VERSION | cut -d'.' -f3) + NEW_MAJOR_VERSION=$(echo $NEW_CS_VERSION | cut -d'.' -f1) + NEW_MINOR_VERSION=$(echo $NEW_CS_VERSION | cut -d'.' -f2) + NEW_PATCH_VERSION=$(echo $NEW_CS_VERSION | cut -d'.' -f3) + + if [[ $NEW_MAJOR_VERSION -gt $CURRENT_MAJOR_VERSION ]]; then + if [[ ${FORCE_MODE} == "false" ]]; then + log_warn "new version ($NEW_CS_VERSION) is a major, you should follow documentation to upgrade !" + echo "" + exit 1 + fi + elif [[ $NEW_MINOR_VERSION -gt $CURRENT_MINOR_VERSION ]] ; then + log_warn "new version ($NEW_CS_VERSION) is a minor upgrade !" + if [[ $ACTION != "upgrade" ]] ; then + if [[ ${FORCE_MODE} == "false" ]]; then + echo "" + echo "We recommend to upgrade with : sudo ./wizard.sh --upgrade " + echo "If you want to $ACTION anyway, please use '--force'." + echo "" + echo "Run : sudo ./wizard.sh --$ACTION --force" + exit 1 + fi + fi + elif [[ $NEW_PATCH_VERSION -gt $CURRENT_PATCH_VERSION ]] ; then + log_warn "new version ($NEW_CS_VERSION) is a patch !" + if [[ $ACTION != "binupgrade" ]] ; then + if [[ ${FORCE_MODE} == "false" ]]; then + echo "" + echo "We recommend to upgrade binaries only : sudo ./wizard.sh --binupgrade " + echo "If you want to $ACTION anyway, please use '--force'." + echo "" + echo "Run : sudo ./wizard.sh --$ACTION --force" + exit 1 + fi + fi + elif [[ $NEW_MINOR_VERSION -eq $CURRENT_MINOR_VERSION ]]; then + log_warn "new version ($NEW_CS_VERSION) is same as current version ($CURRENT_CS_VERSION) !" + if [[ ${FORCE_MODE} == "false" ]]; then + echo "" + echo "We recommend to $ACTION only if it's an higher version. " + echo "If it's an RC version (vX.X.X-rc) you can upgrade it using '--force'." + echo "" + echo "Run : sudo ./wizard.sh --$ACTION --force" + exit 1 + fi + fi +} + +#install crowdsec and cscli +install_crowdsec() { + mkdir -p "${CROWDSEC_DATA_DIR}" + (cd config && find patterns -type f -exec install -Dm 644 "{}" "${CROWDSEC_CONFIG_PATH}/{}" \; && cd ../) || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/scenarios" || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/postoverflows" || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/collections" || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/patterns" || exit + + #tmp + mkdir -p /tmp/data + mkdir -p /etc/crowdsec/hub/ + install -v -m 600 -D "./config/${CLIENT_SECRETS}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit + install -v -m 600 -D "./config/${LAPI_SECRETS}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit + + ## end tmp + + install -v -m 600 -D ./config/config.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit + install -v -m 644 -D ./config/dev.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit + install -v -m 644 -D ./config/user.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit + install -v -m 644 -D ./config/acquis.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit + install -v -m 644 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit + install -v -m 644 -D ./config/simulation.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit + install -v -m 644 -D ./config/"${CONSOLE_FILE}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit + + DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst '$CFG $DATA' < ./config/user.yaml > ${CROWDSEC_CONFIG_PATH}"/user.yaml" || log_fatal "unable to generate user configuration file" + if [[ ${DOCKER_MODE} == "false" ]]; then + CFG=${CROWDSEC_CONFIG_PATH} BIN=${CROWDSEC_BIN_INSTALLED} envsubst '$CFG $BIN' < ./config/crowdsec.service > "${SYSTEMD_PATH_FILE}" || log_fatal "unable to crowdsec systemd file" + fi + install_bins + + if [[ ${DOCKER_MODE} == "false" ]]; then + systemctl daemon-reload + fi +} + +update_bins() { + log_info "Only upgrading binaries" + delete_bins + install_bins + log_info "Upgrade finished" + systemctl restart crowdsec || log_fatal "unable to restart crowdsec with systemctl" +} + +update_full() { + + if [[ ! -f "$CROWDSEC_BIN" ]]; then + log_err "Crowdsec binary '$CROWDSEC_BIN' not found. Please build it with 'make build'" && exit + fi + if [[ ! -f "$CSCLI_BIN" ]]; then + log_err "Cscli binary '$CSCLI_BIN' not found. Please build it with 'make build'" && exit + fi + + log_info "Backing up existing configuration" + ${CSCLI_BIN_INSTALLED} config backup ${BACKUP_DIR} + log_info "Saving default database content if exist" + if [[ -f "/var/lib/crowdsec/data/crowdsec.db" ]]; then + cp /var/lib/crowdsec/data/crowdsec.db ${BACKUP_DIR}/crowdsec.db + fi + log_info "Cleanup existing crowdsec configuration" + uninstall_crowdsec + log_info "Installing crowdsec" + install_crowdsec + log_info "Restoring configuration" + ${CSCLI_BIN_INSTALLED} hub update + ${CSCLI_BIN_INSTALLED} config restore ${BACKUP_DIR} + log_info "Restoring saved database if exist" + if [[ -f "${BACKUP_DIR}/crowdsec.db" ]]; then + cp ${BACKUP_DIR}/crowdsec.db /var/lib/crowdsec/data/crowdsec.db + fi + log_info "Finished, restarting" + systemctl restart crowdsec || log_fatal "Failed to restart crowdsec" +} + +install_bins() { + log_dbg "Installing crowdsec binaries" + install -v -m 755 -D "${CROWDSEC_BIN}" "${CROWDSEC_BIN_INSTALLED}" 1> /dev/null || exit + install -v -m 755 -D "${CSCLI_BIN}" "${CSCLI_BIN_INSTALLED}" 1> /dev/null || exit + which systemctl && systemctl is-active --quiet crowdsec + if [ $? -eq 0 ]; then + systemctl stop crowdsec + fi + install_plugins + symlink_bins +} + +symlink_bins() { + if grep -q "${BIN_INSTALL_PATH}" <<< $PATH; then + log_dbg "${BIN_INSTALL_PATH} found in PATH" + else + ln -s "${CSCLI_BIN_INSTALLED}" /usr/bin/cscli + ln -s "${CROWDSEC_BIN_INSTALLED}" /usr/bin/crowdsec + fi +} + +delete_bins() { + log_info "Removing crowdsec binaries" + rm -f ${CROWDSEC_BIN_INSTALLED} + rm -f ${CSCLI_BIN_INSTALLED} +} + +delete_plugins() { + rm -rf ${CROWDSEC_PLUGIN_DIR} +} + +install_plugins(){ + mkdir -p ${CROWDSEC_PLUGIN_DIR} + mkdir -p /etc/crowdsec/notifications + + cp ${SLACK_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} + cp ${SPLUNK_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} + cp ${HTTP_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} + cp ${EMAIL_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} + + if [[ ${DOCKER_MODE} == "false" ]]; then + cp -n ${SLACK_PLUGIN_CONFIG} /etc/crowdsec/notifications/ + cp -n ${SPLUNK_PLUGIN_CONFIG} /etc/crowdsec/notifications/ + cp -n ${HTTP_PLUGIN_CONFIG} /etc/crowdsec/notifications/ + cp -n ${EMAIL_PLUGIN_CONFIG} /etc/crowdsec/notifications/ + fi +} + +check_running_bouncers() { + #when uninstalling, check if user still has bouncers + BOUNCERS_COUNT=$(${CSCLI_BIN} bouncers list -o=raw | tail -n +2 | wc -l) + if [[ ${BOUNCERS_COUNT} -gt 0 ]] ; then + if [[ ${FORCE_MODE} == "false" ]]; then + echo "WARNING : You have at least one bouncer registered (cscli bouncers list)." + echo "WARNING : Uninstalling crowdsec with a running bouncer will let it in an unpredictable state." + echo "WARNING : If you want to uninstall crowdsec, you should first uninstall the bouncers." + echo "Specify --force to bypass this restriction." + exit 1 + fi; + fi +} + +# uninstall crowdsec and cscli +uninstall_crowdsec() { + systemctl stop crowdsec.service 1>/dev/null + systemctl disable -q crowdsec.service 1>/dev/null + ${CSCLI_BIN} dashboard remove -f -y >/dev/null + delete_bins + + # tmp + rm -rf /tmp/data/ + ## end tmp + + find /etc/crowdsec -maxdepth 1 -mindepth 1 | grep -v "bouncer" | xargs rm -rf || echo "" + rm -f ${CROWDSEC_LOG_FILE} || echo "" + rm -f ${LAPI_LOG_FILE} || echo "" + rm -f ${CROWDSEC_DB_PATH} || echo "" + rm -rf ${CROWDSEC_LIB_DIR} || echo "" + rm -rf ${CROWDSEC_USR_DIR} || echo "" + rm -f ${SYSTEMD_PATH_FILE} || echo "" + log_info "crowdsec successfully uninstalled" +} + + +function show_link { + echo "" + echo "Useful links to start with Crowdsec:" + echo "" + echo " - Documentation : https://doc.crowdsec.net/docs/getting_started/crowdsec_tour" + echo " - Crowdsec Hub : https://hub.crowdsec.net/ " + echo " - Open issues : https://github.com/crowdsecurity/crowdsec/issues" + echo "" + echo "Useful commands to start with Crowdsec:" + echo "" + echo " - sudo cscli metrics : https://doc.crowdsec.net/docs/observability/cscli" + echo " - sudo cscli decisions list : https://doc.crowdsec.net/docs/user_guides/decisions_mgmt" + echo " - sudo cscli hub list : https://doc.crowdsec.net/docs/user_guides/hub_mgmt" + echo "" + echo "Next step: visualize all your alerts and explore our community CTI : https://app.crowdsec.net" + echo "" +} + +main() { + + if [ "$1" == "install" ] || [ "$1" == "configure" ] || [ "$1" == "detect" ]; then + if [ "${SILENT}" == "false" ]; then + which whiptail > /dev/null + if [ $? -ne 0 ]; then + log_fatal "whiptail binary is needed to use the wizard in interactive mode, exiting ..." + fi + fi + which envsubst > /dev/null + if [ $? -ne 0 ]; then + log_fatal "envsubst binary is needed to use do a full install with the wizard, exiting ..." + fi + fi + + if [[ "$1" == "binupgrade" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run the wizard as root or with sudo" + exit 1 + fi + check_cs_version + update_bins + return + fi + + if [[ "$1" == "upgrade" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run the wizard as root or with sudo" + exit 1 + fi + check_cs_version + update_full + return + fi + + if [[ "$1" == "configure" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run the wizard as root or with sudo" + exit 1 + fi + detect_services + ${CSCLI_BIN_INSTALLED} hub update + install_collection + genacquisition + if ! skip_tmp_acquis; then + mv "${TMP_ACQUIS_FILE}" "${ACQUIS_TARGET}" + fi + + return + fi + + if [[ "$1" == "noop" ]]; + then + return + fi + + if [[ "$1" == "uninstall" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run the wizard as root or with sudo" + exit 1 + fi + check_running_bouncers + uninstall_crowdsec + return + fi + + if [[ "$1" == "bininstall" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run the wizard as root or with sudo" + exit 1 + fi + log_info "checking existing crowdsec install" + detect_cs_install + log_info "installing crowdsec" + install_crowdsec + + show_link + return + fi + + if [[ "$1" == "install" ]]; + then + if ! [ $(id -u) = 0 ]; then + log_err "Please run the wizard as root or with sudo" + exit 1 + fi + log_info "checking if crowdsec is installed" + detect_cs_install + ## Do make build before installing (as non--root) in order to have the binary and then install crowdsec as root + log_info "installing crowdsec" + install_crowdsec + log_dbg "configuring ${CSCLI_BIN_INSTALLED}" + ${CSCLI_BIN_INSTALLED} hub update > /dev/null 2>&1 || (log_err "fail to update crowdsec hub. exiting" && exit 1) + + # detect running services + detect_services + if ! [ ${#DETECTED_SERVICES[@]} -gt 0 ] ; then + log_err "No detected or selected services, stopping." + exit 1 + fi; + + # Generate acquisition file and move it to the right folder + genacquisition + if ! skip_tmp_acquis; then + mv "${TMP_ACQUIS_FILE}" "${ACQUIS_TARGET}" + fi + log_info "acquisition file path: ${ACQUIS_TARGET}" + # Install collections according to detected services + log_dbg "Installing needed collections ..." + install_collection + + # install patterns/ folder + log_dbg "Installing patterns" + mkdir -p "${PATTERNS_PATH}" + cp "./${PATTERNS_FOLDER}/"* "${PATTERNS_PATH}/" + + # api register + ${CSCLI_BIN_INSTALLED} machines add --force "$(cat /etc/machine-id)" -a -f "${CROWDSEC_CONFIG_PATH}/${CLIENT_SECRETS}" || log_fatal "unable to add machine to the local API" + log_dbg "Crowdsec LAPI registered" + + ${CSCLI_BIN_INSTALLED} capi register || log_fatal "unable to register to the Central API" + log_dbg "Crowdsec CAPI registered" + + systemctl enable -q crowdsec >/dev/null || log_fatal "unable to enable crowdsec" + systemctl start crowdsec >/dev/null || log_fatal "unable to start crowdsec" + log_info "enabling and starting crowdsec daemon" + show_link + return + fi + + if [[ "$1" == "detect" ]]; + then + if ! skip_tmp_acquis; then + rm -f "${TMP_ACQUIS_FILE}" + fi + detect_services + if [[ ${DETECTED_SERVICES} == "" ]] ; then + log_err "No detected or selected services, stopping." + exit + fi; + log_info "Found ${#DETECTED_SERVICES[@]} supported services running:" + genacquisition + cat "${TMP_ACQUIS_FILE}" + if ! skip_tmp_acquis; then + rm "${TMP_ACQUIS_FILE}" + fi + return + fi + +} + +usage() { + echo "Usage:" + echo " ./wizard.sh -h Display this help message." + echo " ./wizard.sh -d|--detect Detect running services and associated logs file" + echo " ./wizard.sh -i|--install Assisted installation of crowdsec/cscli and collections" + echo " ./wizard.sh --bininstall Install binaries and empty config, no wizard." + echo " ./wizard.sh --uninstall Uninstall crowdsec/cscli" + echo " ./wizard.sh --binupgrade Upgrade crowdsec/cscli binaries" + echo " ./wizard.sh --upgrade Perform a full upgrade and try to migrate configs" + echo " ./wizard.sh --unattended Install in unattended mode, no question will be asked and defaults will be followed" + echo " ./wizard.sh --docker-mode Will install crowdsec without systemd and generate random machine-id" + echo " ./wizard.sh -n|--noop Do nothing" + + exit 0 +} + +if [[ $# -eq 0 ]]; then +usage +fi + +while [[ $# -gt 0 ]] +do + key="${1}" + case ${key} in + --uninstall) + ACTION="uninstall" + shift #past argument + ;; + --binupgrade) + ACTION="binupgrade" + shift #past argument + ;; + --upgrade) + ACTION="upgrade" + shift #past argument + ;; + -i|--install) + ACTION="install" + shift # past argument + ;; + --bininstall) + ACTION="bininstall" + shift # past argument + ;; + --docker-mode) + DOCKER_MODE="true" + ACTION="bininstall" + shift # past argument + ;; + -c|--configure) + ACTION="configure" + shift # past argument + ;; + -d|--detect) + ACTION="detect" + shift # past argument + ;; + -n|--noop) + ACTION="noop" + shift # past argument + ;; + --unattended) + SILENT="true" + ACTION="install" + shift + ;; + -f|--force) + FORCE_MODE="true" + shift + ;; + -v|--verbose) + DEBUG_MODE="true" + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) # unknown option + log_err "Unknown argument ${key}." + usage + exit 1 + ;; + esac +done + +main ${ACTION} -- 2.30.2