From 20a11167d17ba929bff588c758dd76dd404b0255 Mon Sep 17 00:00:00 2001 From: nandinivij <61885842+nandinivij@users.noreply.github.com> Date: Wed, 29 Jul 2020 15:54:35 -0700 Subject: [PATCH 1/5] Troubleshooting/problem >solution (#593) * Troubleshooting guide format problem solution * Minor chnages * minor changes --- docs/troubleshooting.md | 80 ++++++++++++++++++++++++++++++----------- 1 file changed, 60 insertions(+), 20 deletions(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index e14b1ae..d32bc91 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,25 +1,23 @@ #Troubleshooting -## Startup - -Most issues that occur with startup and operation of sc4s typically involve syntax errors or duplicate listening ports. If you are -running out of systemd, you may see this at startup: +## Not getting starting up message in Splunk! +### Are there any syntax errors or duplicate listening ports? +Check for errors with the following command. ```bash [root@sc4s syslog-ng]# systemctl start sc4s Job for sc4s.service failed because the control process exited with error code. See "systemctl status sc4s.service" and "journalctl -xe" for details. ``` -In other cases, there may be nothing untoward after starting with systemd, but the container is not running at all -after checking with `podman logs SC4S` or `podman ps`. A more informative command than `journalctl -xe` is the following, +### Is your container running? + Check using `podman logs SC4S` or `podman ps`. A more informative command than `journalctl -xe` is the following, ``` journalctl -b -u sc4s | tail -100 ``` which will print the last 100 lines of the system journal in far more detail, which should be sufficient to see the specific failure (syntax or runtime) and guide you in troubleshooting why the container exited unexpectedly. -As an alternative to launching via systemd during the initial installation phase, you may wish to test the container startup outside of the -systemd startup environment. The following commmand will launch the container directly from the CLI. This command assumes the local mounted -directories are set up as shown in the "getting started" examples: +### Is your container working outside of the systemd startup environment? +The following command will launch the container directly from the CLI. This command assumes the local mounted directories are set up as shown in the "getting started" examples: ```bash /usr/bin/podman run -p 514:514 -p 514:514/udp -p 6514:6514 -p 5000-5020:5000-5020 -p 5000-5020:5000-5020/udp \ @@ -33,7 +31,7 @@ directories are set up as shown in the "getting started" examples: If you are using docker, substitute "docker" for "podman" for the container runtime command above. -### Stale Containers (podman) +### Are there any stale containers in your environment? (podman) In rare instances, (especially when starting/stopping often) an SC4S container might not shut down completely when using podman, leaving a "stale" container behind that is denoted by a very long ID string. You will see this type of output when viewing the journal after a failed @@ -51,16 +49,7 @@ podman rm -f 894357502b2a7142d097ea3ca1468d1cb4fbc69959a9817a1bbe145a09d37fb9 replacing the long string with whatever container ID is shown in your error message. SC4S should then start normally. -## Verification of TLS Server - -To verify the correct configuration of the TLS server use the following command. Use `podman` or `docker` and replace the IP, FQDN, -and port as appropriate: - -```bash - run -ti drwetter/testssl.sh --severity MEDIUM --ip 127.0.0.1 selfsigned.example.com:6510 -``` - -## Validating HEC/token issues (AKA "No data in Splunk") +### Are there any HEC errors in the podman logs? SC4S performs basic HEC connectivity and index checks at startup. These indicate general connection issues and indexes that may not be accesible and/or configured on the Splunk side. To check the container logs which contain the results of these tests, run: @@ -93,6 +82,57 @@ A `400` error (not 404) is normally caused by an index that has not been created just _one_ bad index will "taint" the entire batch (in this case, 1000 events) and prevent _any_ of them from being sent to Splunk. _It is imperative that the container logs be free of these kinds of errors in production._ +### Is SC4S server out of space? +Check the connection to Splunk. If the connection is lost for a long period it will lead to increase in disk space due to disk buffer backup. +Adjust the size of the disk buffer using env_file. [Disk buffer configuration](https://splunk-connect-for-syslog.readthedocs.io/en/master/configuration/#disk-buffer-variables) + +Check env_file if `SC4S_DEST_GLOBAL_ALTERNATES=d_hec_debug` is enabled and hence archive is consuming disk space. + +Check the method consuming disk space use `df -h` and then `du -sh *` to find out what's causing it. + +Try rebuilding sc4s volume. +``` +podman volume rm splunk-sc4s-var +podman volume create splunk-sc4s-var +``` +Try pruning containers +``` +podman system prune +``` + +### Are there any kernel memory warnings? + +```bash +/usr/bin/ logs SC4S +``` +Note the output. The following warning message is not a failure condition unless we are reaching the upper limit of hardware performance. +``` +The kernel refused to set the receive buffer (SO_RCVBUF) to the requested size, you probably need to adjust buffer related kernel parameters; so_rcvbuf='1703936', so_rcvbuf_set='425984' +``` +Make changes to /etc/sysctl.conf. Changing receive buffer values here to 16 MB: + +``` +net.core.rmem_default = 1703936 +net.core.rmem_max = 1703936. +``` +Run following commands for changes to be affected. +``` +sysctl -p restart SC4S +``` + +## Are there events with incorrect timezone? +By default, SC4S resolves the timezone to GMT. If customer have a preference to use local TZ then set the user TZ preference in Splunk during search time rather than at index time. +[Timezone config documentation](https://docs.splunk.com/Documentation/Splunk/8.0.4/Data/ApplyTimezoneOffsetstotimestamps) + +## Verification of TLS Server + +To verify the correct configuration of the TLS server use the following command. Use `podman` or `docker` and replace the IP, FQDN, +and port as appropriate: + +```bash + run -ti drwetter/testssl.sh --severity MEDIUM --ip 127.0.0.1 selfsigned.example.com:6510 +``` + ## Enabling the Alternate Debug Destination To help debug why the `400` errors are ocurring, it is helpful to enable an alternate destination for syslog traffic that will write From 1ba2e4dcd4dff580279b25f1e7b0aaf056b45cb4 Mon Sep 17 00:00:00 2001 From: Ryan Faircloth <35384120+rfaircloth-splunk@users.noreply.github.com> Date: Wed, 29 Jul 2020 19:31:23 -0400 Subject: [PATCH 2/5] Revert "Troubleshooting/problem >solution (#593)" (#597) This reverts commit 20a11167d17ba929bff588c758dd76dd404b0255. --- docs/troubleshooting.md | 80 +++++++++++------------------------------ 1 file changed, 20 insertions(+), 60 deletions(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index d32bc91..e14b1ae 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,23 +1,25 @@ #Troubleshooting -## Not getting starting up message in Splunk! +## Startup + +Most issues that occur with startup and operation of sc4s typically involve syntax errors or duplicate listening ports. If you are +running out of systemd, you may see this at startup: -### Are there any syntax errors or duplicate listening ports? -Check for errors with the following command. ```bash [root@sc4s syslog-ng]# systemctl start sc4s Job for sc4s.service failed because the control process exited with error code. See "systemctl status sc4s.service" and "journalctl -xe" for details. ``` -### Is your container running? - Check using `podman logs SC4S` or `podman ps`. A more informative command than `journalctl -xe` is the following, +In other cases, there may be nothing untoward after starting with systemd, but the container is not running at all +after checking with `podman logs SC4S` or `podman ps`. A more informative command than `journalctl -xe` is the following, ``` journalctl -b -u sc4s | tail -100 ``` which will print the last 100 lines of the system journal in far more detail, which should be sufficient to see the specific failure (syntax or runtime) and guide you in troubleshooting why the container exited unexpectedly. -### Is your container working outside of the systemd startup environment? -The following command will launch the container directly from the CLI. This command assumes the local mounted directories are set up as shown in the "getting started" examples: +As an alternative to launching via systemd during the initial installation phase, you may wish to test the container startup outside of the +systemd startup environment. The following commmand will launch the container directly from the CLI. This command assumes the local mounted +directories are set up as shown in the "getting started" examples: ```bash /usr/bin/podman run -p 514:514 -p 514:514/udp -p 6514:6514 -p 5000-5020:5000-5020 -p 5000-5020:5000-5020/udp \ @@ -31,7 +33,7 @@ The following command will launch the container directly from the CLI. This com If you are using docker, substitute "docker" for "podman" for the container runtime command above. -### Are there any stale containers in your environment? (podman) +### Stale Containers (podman) In rare instances, (especially when starting/stopping often) an SC4S container might not shut down completely when using podman, leaving a "stale" container behind that is denoted by a very long ID string. You will see this type of output when viewing the journal after a failed @@ -49,7 +51,16 @@ podman rm -f 894357502b2a7142d097ea3ca1468d1cb4fbc69959a9817a1bbe145a09d37fb9 replacing the long string with whatever container ID is shown in your error message. SC4S should then start normally. -### Are there any HEC errors in the podman logs? +## Verification of TLS Server + +To verify the correct configuration of the TLS server use the following command. Use `podman` or `docker` and replace the IP, FQDN, +and port as appropriate: + +```bash + run -ti drwetter/testssl.sh --severity MEDIUM --ip 127.0.0.1 selfsigned.example.com:6510 +``` + +## Validating HEC/token issues (AKA "No data in Splunk") SC4S performs basic HEC connectivity and index checks at startup. These indicate general connection issues and indexes that may not be accesible and/or configured on the Splunk side. To check the container logs which contain the results of these tests, run: @@ -82,57 +93,6 @@ A `400` error (not 404) is normally caused by an index that has not been created just _one_ bad index will "taint" the entire batch (in this case, 1000 events) and prevent _any_ of them from being sent to Splunk. _It is imperative that the container logs be free of these kinds of errors in production._ -### Is SC4S server out of space? -Check the connection to Splunk. If the connection is lost for a long period it will lead to increase in disk space due to disk buffer backup. -Adjust the size of the disk buffer using env_file. [Disk buffer configuration](https://splunk-connect-for-syslog.readthedocs.io/en/master/configuration/#disk-buffer-variables) - -Check env_file if `SC4S_DEST_GLOBAL_ALTERNATES=d_hec_debug` is enabled and hence archive is consuming disk space. - -Check the method consuming disk space use `df -h` and then `du -sh *` to find out what's causing it. - -Try rebuilding sc4s volume. -``` -podman volume rm splunk-sc4s-var -podman volume create splunk-sc4s-var -``` -Try pruning containers -``` -podman system prune -``` - -### Are there any kernel memory warnings? - -```bash -/usr/bin/ logs SC4S -``` -Note the output. The following warning message is not a failure condition unless we are reaching the upper limit of hardware performance. -``` -The kernel refused to set the receive buffer (SO_RCVBUF) to the requested size, you probably need to adjust buffer related kernel parameters; so_rcvbuf='1703936', so_rcvbuf_set='425984' -``` -Make changes to /etc/sysctl.conf. Changing receive buffer values here to 16 MB: - -``` -net.core.rmem_default = 1703936 -net.core.rmem_max = 1703936. -``` -Run following commands for changes to be affected. -``` -sysctl -p restart SC4S -``` - -## Are there events with incorrect timezone? -By default, SC4S resolves the timezone to GMT. If customer have a preference to use local TZ then set the user TZ preference in Splunk during search time rather than at index time. -[Timezone config documentation](https://docs.splunk.com/Documentation/Splunk/8.0.4/Data/ApplyTimezoneOffsetstotimestamps) - -## Verification of TLS Server - -To verify the correct configuration of the TLS server use the following command. Use `podman` or `docker` and replace the IP, FQDN, -and port as appropriate: - -```bash - run -ti drwetter/testssl.sh --severity MEDIUM --ip 127.0.0.1 selfsigned.example.com:6510 -``` - ## Enabling the Alternate Debug Destination To help debug why the `400` errors are ocurring, it is helpful to enable an alternate destination for syslog traffic that will write From f79c3681be54055953bdaec0f7dcc0ede400ebe0 Mon Sep 17 00:00:00 2001 From: Ryan Faircloth <35384120+rfaircloth-splunk@users.noreply.github.com> Date: Thu, 30 Jul 2020 11:52:23 -0400 Subject: [PATCH 3/5] Manual Merge (#599) * [filterchange] Cisco Nexus (#594) Adjust airOS and Nexsus to address event with no HOST but with TZ indicator * [doc] Palo filter does not support UDP due to limitations of message size (#591) * [filteradd] Carbon Black Protection CEF format (#590) * [fix] noise in SC4S logs from goss (#595) * Troubleshooting/problem >solution (#593) (#596) * Troubleshooting/problem >solution (#593) * Troubleshooting guide format problem solution * Minor chnages * minor changes * Revert "Troubleshooting/problem >solution (#593)" (#597) This reverts commit 20a11167d17ba929bff588c758dd76dd404b0255. Co-authored-by: nandinivij <61885842+nandinivij@users.noreply.github.com> Co-authored-by: nandinivij <61885842+nandinivij@users.noreply.github.com> --- docs/gettingstarted/index.md | 2 + docs/sources/PaloaltoNetworks/index.md | 1 - docs/sources/VMWare/index.md | 56 +++++++++++++++++++ .../conf.d/filters/cisco/cisco_syslog.conf | 9 +-- .../splunk_metadata.csv.example | 4 +- package/sbin/entrypoint.sh | 2 +- tests/test_cisco_ios.py | 1 + 7 files changed, 65 insertions(+), 10 deletions(-) diff --git a/docs/gettingstarted/index.md b/docs/gettingstarted/index.md index ecc06ec..7eb766c 100644 --- a/docs/gettingstarted/index.md +++ b/docs/gettingstarted/index.md @@ -37,6 +37,7 @@ using the SC4S defaults. SC4S can be easily customized to use different indexes * email * epav +* epintel * netauth * netdlp * netdns @@ -46,6 +47,7 @@ using the SC4S defaults. SC4S can be easily customized to use different indexes * netwaf * netproxy * netipam +* oswin * oswinsec * osnix * em_metrics (Optional opt-in for SC4S operational metrics; ensure this is created as a metrics index) diff --git a/docs/sources/PaloaltoNetworks/index.md b/docs/sources/PaloaltoNetworks/index.md index f57e6c6..ac124d1 100644 --- a/docs/sources/PaloaltoNetworks/index.md +++ b/docs/sources/PaloaltoNetworks/index.md @@ -50,7 +50,6 @@ MSG Parse: This filter parses message content | Variable | default | description | |----------------|----------------|----------------| | SC4S_LISTEN_PALOALTO_PANOS_TCP_PORT | empty string | Enable a TCP port for this specific vendor product using a comma-separated list of port numbers | -| SC4S_LISTEN_PALOALTO_PANOS_UDP_PORT | empty string | Enable a UDP port for this specific vendor product using a comma-separated list of port numbers | | SC4S_ARCHIVE_PALOALTO_PANOS | no | Enable archive to disk for this specific source | | SC4S_DEST_PALOALTO_PANOS_HEC | no | When Splunk HEC is disabled globally set to yes to enable this specific source | diff --git a/docs/sources/VMWare/index.md b/docs/sources/VMWare/index.md index 42a7dc0..c96ecd4 100644 --- a/docs/sources/VMWare/index.md +++ b/docs/sources/VMWare/index.md @@ -1,5 +1,61 @@ # Vendor - Dell - VMware +## Product - Carbon Black Protection + +| Ref | Link | +|----------------|---------------------------------------------------------------------------------------------------------| +| Splunk Add-on CEF | none | +| Splunk Add-on Source Specific | https://bitbucket.org/SPLServices/ta-cef-imperva-incapsula/downloads/ | + + +### Sourcetypes + +| sourcetype | notes | +|----------------|---------------------------------------------------------------------------------------------------------| +| cef | Common sourcetype | + +### Source + +| source | notes | +|----------------|---------------------------------------------------------------------------------------------------------| +| carbonblack:protection:cef | Note this method of onboarding is not recommended for a more complete experience utilize the json format supported by he product with hec or s3 | + +### Index Configuration + +| key | source | index | notes | +|----------------|----------------|----------------|----------------| +| Carbon Black_Protection | carbonblack:protection:cef | epintel | none | + +### Filter type + +MSG Parse: This filter parses message content + +### Options + +Note listed for reference processing utilizes the Microsoft ArcSight log path as this format is a subtype of CEF + +| Variable | default | description | +|----------------|----------------|----------------| +| SC4S_LISTEN_CEF_TCP_PORT | empty string | Enable a TCP port for this specific vendor product using a comma-separated list of port numbers | +| SC4S_LISTEN_CEF_UDP_PORT | empty string | Enable a UDP port for this specific vendor product using a comma-separated list of port numbers | +| SC4S_ARCHIVE_CEF | no | Enable archive to disk for this specific source | +| SC4S_DEST_CEF_HEC | no | When Splunk HEC is disabled globally set to yes to enable this specific source | + +* NOTE: Set only _one_ set of CEF variables for the entire SC4S deployment, regardless of how +many ports are in use by this CEF source (or any others). See the "Common Event Format" source +documentation for more information. + +### Verification + +An active site will generate frequent events use the following search to check for new events + +Verify timestamp, and host values match as expected + +``` +index= (sourcetype=cef source="carbonblack:protection:cef") +``` + + ## Product - vSphere - ESX NSX (Controller, Manager, Edge) diff --git a/package/etc/conf.d/filters/cisco/cisco_syslog.conf b/package/etc/conf.d/filters/cisco/cisco_syslog.conf index af04a54..61da774 100644 --- a/package/etc/conf.d/filters/cisco/cisco_syslog.conf +++ b/package/etc/conf.d/filters/cisco/cisco_syslog.conf @@ -45,22 +45,19 @@ parser cisco-parser-ex{ } elif { # Cisco Nexus Switch filter { - message('^<(?\d*)>:? ?20\d\d (\w\w\w \d+ \d\d:\d\d:\d\d(?:.\d{3,3})?) ([^ ]+) ((%[^\: ]+)\:? ?.*)' + message('^<(?\d*)>:? ?20\d\d (\w\w\w \d+ \d\d:\d\d:\d\d(?:.\d{3,3})?)(?: [A-Z]{3}:)? ([^ ]+)? ?((%[^\: ]+)\:? ?.*)' flags(store-matches)); }; rewrite { set( "${3}", - value("HOST") + value("HOST") + condition(not match('^\d+$', value('3')) and match('^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$|^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$' value('3'))) ); set( "${4}", value("MESSAGE") ); - set( - "${5}", - value("PROGRAM") - ); }; parser { date-parser-nofilter(format( '%b %d %H:%M:%S.%f', diff --git a/package/etc/context_templates/splunk_metadata.csv.example b/package/etc/context_templates/splunk_metadata.csv.example index 3bc7e0e..eaf6a00 100644 --- a/package/etc/context_templates/splunk_metadata.csv.example +++ b/package/etc/context_templates/splunk_metadata.csv.example @@ -2,8 +2,8 @@ bluecoat_proxy,index,netproxy brocade_syslog,index,netops ArcSight_ArcSight,index,main ArcSight_ArcSight,source,ArcSight:ArcSight -Carbon Black_Protection,sourcetype,carbonblack:protection:cef -Carbon Black_Protection,index,cb:cef +Carbon Black_Protection,source,carbonblack:protection:cef +Carbon Black_Protection,index,epintel Cyber-Ark_Vault,index,netauth Cyber-Ark_Vault,sourcetype,cyberark:epv:cef CyberArk_PTA,index,main diff --git a/package/sbin/entrypoint.sh b/package/sbin/entrypoint.sh index 92e68a9..73657c5 100755 --- a/package/sbin/entrypoint.sh +++ b/package/sbin/entrypoint.sh @@ -108,7 +108,7 @@ echo sc4s version=$(cat /VERSION) >/opt/syslog-ng/var/log/syslog-ng.out # Use gomplate to pick up default listening ports for health check echo starting goss gomplate --file /goss.yaml.tmpl --out /goss.yaml -goss -g /goss.yaml serve --format json >/dev/null & +goss -g /goss.yaml serve --format json >/dev/null 2>/dev/null & echo syslog-ng starting /opt/syslog-ng/bin/persist-tool add /opt/syslog-ng/etc/reset_persist -o /opt/syslog-ng/var diff --git a/tests/test_cisco_ios.py b/tests/test_cisco_ios.py index 8d3a2bc..98167d5 100644 --- a/tests/test_cisco_ios.py +++ b/tests/test_cisco_ios.py @@ -44,6 +44,7 @@ "{{ mark }}22191: {{ host }}: 022546: {{ bsd }}.{{ millisec }} CDT: %PARSER-5-CFGLOG_LOGGEDCMD: User:dfa_service_admin logged command:!exec: enable", "{{ mark }}{{ host }}: {{ year }} {{ bsd }} CDT: %MODULE-2-MOD_SOMEPORTS_FAILED: Module 13 (Serial number: JAF12345678) reported failure on ports Eth13/17-20 (Ethernet) due to hardware not accessible in device DEV_CLP_FWD(device error 0xca804200)", "{{ mark }}{{ host }}: {{ year }} {{ bsd }}.{{ millisec }} CDT: %MODULE-2-MOD_SOMEPORTS_FAILED: Module 13 (Serial number: JAF12345678) reported failure on ports Eth13/17-20 (Ethernet) due to hardware not accessible in device DEV_CLP_FWD(device error 0xca804200)", + "{{ mark }}: 2020 {{ bsd }} EDT: %L2FM-4-L2FM_MAC_MOVE: Mac e4c7.2266.f741 in vlan 1159 has moved from 100.16.4513 to {{ host }}" ] testdata_badtime = [ "{{ mark }}{{ seq }}: {{ host }}: 6340004: *{{ bsd }}: %SEC-6-IPACCESSLOGP: list INET-BLOCK permitted tcp 192.168.20.252(55244) -> 10.54.3.178(44818), 1 packet", From 0d6fea379e48217f4a2b8200cb74cabfe6c6a8b9 Mon Sep 17 00:00:00 2001 From: Ryan Faircloth <35384120+rfaircloth-splunk@users.noreply.github.com> Date: Tue, 4 Aug 2020 11:10:15 -0400 Subject: [PATCH 4/5] Release PR for v1.26.1 (#603) * [filterchange] Cisco Nexus (#594) Adjust airOS and Nexsus to address event with no HOST but with TZ indicator * [doc] Palo filter does not support UDP due to limitations of message size (#591) * [filteradd] Carbon Black Protection CEF format (#590) * [fix] noise in SC4S logs from goss (#595) * Troubleshooting/problem >solution (#593) (#596) * Troubleshooting/problem >solution (#593) * Troubleshooting guide format problem solution * Minor chnages * minor changes * Revert "Troubleshooting/problem >solution (#593)" (#597) This reverts commit 20a11167d17ba929bff588c758dd76dd404b0255. Co-authored-by: nandinivij <61885842+nandinivij@users.noreply.github.com> * [filter] improve f5 filter logic (#601) * [filter] Additional severity levels for f5 * [filter] improve f5 matching * Fix indents * Fix indents Co-authored-by: mbonsack * [doc] clarify SC4S_DEST_SPLUNK_HEC_TLS_CA_FILE (#602) * [doc] clarify SC4S_DEST_SPLUNK_HEC_TLS_CA_FILE * Clarification edit * Clarification edit * Clarify edit 2 * Clarify edit 2 * Clarify edit 3 * Clarify edit 3 * Clarify edit 4 * Clarify edit 4 Co-authored-by: mbonsack Co-authored-by: nandinivij <61885842+nandinivij@users.noreply.github.com> Co-authored-by: mbonsack --- docs/configuration.md | 2 +- package/etc/conf.d/filters/f5/bigip.conf.tmpl | 3 ++- package/etc/conf.d/log_paths/lp-f5_bigip.conf.tmpl | 1 + tests/docker-compose.yml | 3 +-- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index 4029d54..6c6cf86 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -24,7 +24,7 @@ syslog. | SC4S_DEST_SPLUNK_HEC_GLOBAL | yes | Send events to Splunk using HEC. This applies _only_ to the primary HEC destination. | | SC4S_DEST_SPLUNK_HEC_CIPHER_SUITE | comma separated list | Open SSL cipher suite list | | SC4S_DEST_SPLUNK_HEC_SSL_VERSION | comma separated list | Open SSL version list | -| SC4S_DEST_SPLUNK_HEC_TLS_CA_FILE | path | Custom trusted cert file | +| SC4S_DEST_SPLUNK_HEC_TLS_CA_FILE | _container_ path `/opt/syslog-ng/tls/server.pem` | Custom trusted cert file, specified as a full path in the _container_ filesystem: `/opt/syslog-ng/tls/`
Ensure that the container TLS directory `/opt/syslog-ng/tls` is available locally via container mount in the `docker-compose.yml` or systemd unit file, and that you place the CA file in the locally-mounted directory. | | SC4S_DEST_SPLUNK_HEC_TLS_VERIFY | yes(default) or no | verify HTTP(s) certificate | | SC4S_DEST_SPLUNK_HEC_WORKERS | numeric | Number of destination workers (default: 10 threads). This should rarely need to be changed; consult sc4s community for advice on appropriate setting in extreme high- or low-volume environments. | | SC4S_DEST_SPLUNK_INDEXED_FIELDS | facility,
severity,
container,
loghost,
destport,
fromhostip,
proto

none | List of sc4s indexed fields that will be included with each event in Splunk (default is the entire list except "none"). Two other indexed fields, `sc4s_vendor_product` and `sc4s_syslog_format`, will also appear along with the fields selected via the list and cannot be turned on or off individually. If no indexed fields are desired (including the two internal ones), set the value to the single value of "none". When setting this variable, separate multiple entries with commas and do not include extra spaces.

This list maps to the following indexed fields that will appear in all Splunk events:
facility: sc4s_syslog_facility
severity: sc4s_syslog_severity
container: sc4s_container
loghost: sc4s_loghost
dport: sc4s_destport
fromhostip: sc4s_fromhostip
proto: sc4s_proto diff --git a/package/etc/conf.d/filters/f5/bigip.conf.tmpl b/package/etc/conf.d/filters/f5/bigip.conf.tmpl index 8b25e6f..7215a90 100644 --- a/package/etc/conf.d/filters/f5/bigip.conf.tmpl +++ b/package/etc/conf.d/filters/f5/bigip.conf.tmpl @@ -3,6 +3,7 @@ filter f_f5_bigip { or match('^\[F5@12276' value("SDATA")) or program("tmsh") or program("mcpd") + or program("mprov") or program("apmd") or program("tmm\d?") or program('^f5_irule=') @@ -16,7 +17,7 @@ filter f_f5_bigip_irule { filter f_f5_bigip_message { message( - '^(?i)(<\d+> ?[[:alpha:]]+\s{1,2}\d{1,2} \d\d:\d\d:\d\d )(?:([^\/]+)(?:\/))?([^ ]+) +(?:notice|err|error|warning|info) +?(.*)' + '^(?i)(<\d+> ?[[:alpha:]]+\s{1,2}\d{1,2} \d\d:\d\d:\d\d )(?:([^\/ ]+)(?:\/))?([^ ]+) +(?:alert|debug|notice|err|error|warning|info|emerg) +?(.*)' flags(store-matches) ); }; diff --git a/package/etc/conf.d/log_paths/lp-f5_bigip.conf.tmpl b/package/etc/conf.d/log_paths/lp-f5_bigip.conf.tmpl index 308d60d..a2b4d64 100644 --- a/package/etc/conf.d/log_paths/lp-f5_bigip.conf.tmpl +++ b/package/etc/conf.d/log_paths/lp-f5_bigip.conf.tmpl @@ -24,6 +24,7 @@ log { filter{ program("tmsh") or program("mcpd") + or program("mprov") or program("apmd") or program("tmm\d?") }; diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml index 88a462f..fe7d932 100644 --- a/tests/docker-compose.yml +++ b/tests/docker-compose.yml @@ -13,8 +13,7 @@ services: build: context: ../package hostname: sc4s - #When this is enabled test_common will fail - # command: -det + command: -det ports: - "514" - "601" From 3fa55d0a7d03c9d0ffdd0877fc4d9005f48be26d Mon Sep 17 00:00:00 2001 From: Ryan Faircloth <35384120+rfaircloth-splunk@users.noreply.github.com> Date: Mon, 10 Aug 2020 12:31:32 -0400 Subject: [PATCH 5/5] Merge to master (#627) * [filterchange] Cisco Nexus (#594) Adjust airOS and Nexsus to address event with no HOST but with TZ indicator * [doc] Palo filter does not support UDP due to limitations of message size (#591) * [filteradd] Carbon Black Protection CEF format (#590) * [fix] noise in SC4S logs from goss (#595) * Troubleshooting/problem >solution (#593) (#596) * Troubleshooting/problem >solution (#593) * Troubleshooting guide format problem solution * Minor chnages * minor changes * Revert "Troubleshooting/problem >solution (#593)" (#597) This reverts commit 20a11167d17ba929bff588c758dd76dd404b0255. Co-authored-by: nandinivij <61885842+nandinivij@users.noreply.github.com> * [filter] improve f5 filter logic (#601) * [filter] Additional severity levels for f5 * [filter] improve f5 matching * Fix indents * Fix indents Co-authored-by: mbonsack * [doc] clarify SC4S_DEST_SPLUNK_HEC_TLS_CA_FILE (#602) * [doc] clarify SC4S_DEST_SPLUNK_HEC_TLS_CA_FILE * Clarification edit * Clarification edit * Clarify edit 2 * Clarify edit 2 * Clarify edit 3 * Clarify edit 3 * Clarify edit 4 * Clarify edit 4 Co-authored-by: mbonsack * [doc] Update troubleshooting guide (#600) * Update troubleshooting guide Co-authored-by: mbonsack * Unit file updates/home page update (#605) * Update unit files for proper restart behavior * Update pathnames for `sleep` and `conntrack` * Update SC4S docs home page to indicate fully supported status * Update `entrypoint.sh` HEC check (#607) * Update `entrypoint.sh` to honor value of `SC4S_DEST_SPLUNK_HEC_TLS_VERIFY` during HEC connectivity check * [filtermod] Update Citrix to handle malformed AAA (#609) * [filtermod] Checkpoint doesn't use the correct whitespace (#608) * [filtermod] CEF time stamp (#612) False error caused when itterating through multiple time stamps add support for fractional seconds * [fix] Incorrect host resolution (#610) * [fix] Incorrect host resolution When the log source includes an IP as host resolve using connection IP rather than field IP. When the host resolves to a single name rather than FQDN do not set the host value as this can't be trusted * Update test_common.py * Update fix_dns.conf * [filtermod] Resolve time issue in acs (#613) * [filtermod] Resolve time issue in acs ACS more often than not does not send a better time than BSD time field so do not attempt to use it * Update lp-cisco_acs.conf.tmpl * [filtermod] cisco date parser issues (#611) * [filtermod] cisco date parser issues False error reported when cisco device sends uptime device reported time errors will now use the indexed field cisco_time_error Add micro seconds format without year * Update cisco_syslog.conf Use cisco time even when wrong * [filtermod] Fix issue with cp nested syslog (#614) * [fix] lost brace in merge (#619) * [filtermod] cisco date parser issues False error reported when cisco device sends uptime device reported time errors will now use the indexed field cisco_time_error Add micro seconds format without year * Update cisco_syslog.conf Use cisco time even when wrong * Update cisco_syslog.conf * RSA SecurID timestamps (#616) * Fix time/date parsing in RSA SecurID * Nit: Should fix all references to this source as `SecurID` (with no "e") * [fix] revert acs changes (#620) Revert * [fix] lookup host by sourceip (#621) * [fix] lookup host by sourceip * Update test_common.py * Update test_cisco_acs.py (#622) Improve test to avoid flaky ness * [fix] Fixes #604 (#615) Do not check client cert in TLS connections as we do not support a reasonable means of authorization checking * [filtermod] Add program 'iControlPortal.cgi' to f5 list Fixes #568 (#617) * [doc] prevent conntrack from halting start (#618) * CEF: Imperva WAF timestamp parsing (#624) * CEF: Imperva WAF timestamp parsing fix * Fixed Cisco WSA python test (#623) * Fixed Cisco WSA python test * Update local container guidance (#625) * Add item to remove pull from unit file in local container guidance * Clarify use of internal (syslog-ng based) load balancing vs. external LB * [feature] Alpha Documentation and changes to support microk8s This is alpha level support and rough docs for k8s as a runtime * [filtermod] Resolve f5 UTF issue with glob (#626) Co-authored-by: nandinivij <61885842+nandinivij@users.noreply.github.com> Co-authored-by: mbonsack --- deploy/k8s-microk8s/sc4s-ds.yaml | 68 +++++ deploy/k8s-microk8s/sc4s-infra.yaml | 267 ++++++++++++++++++ docs/gettingstarted/docker-systemd-general.md | 1 - docs/gettingstarted/index.md | 21 +- docs/gettingstarted/k8s-microk8s.md | 77 +++++ docs/gettingstarted/podman-systemd-general.md | 5 +- docs/index.md | 16 +- .../troubleshoot_SC4S_server.md} | 144 +++++----- .../troubleshooting/troubleshoot_resources.md | 81 ++++++ mkdocs.yml | 7 +- .../conf.d/conflib/_common/syslog_format.conf | 4 + .../etc/conf.d/conflib/_splunk/fix_dns.conf | 13 +- .../etc/conf.d/filters/checkpoint/splunk.conf | 62 ++-- .../conf.d/filters/cisco/cisco_syslog.conf | 25 +- .../filters/citrix/netscalersdx.conf.tmpl | 11 + package/etc/conf.d/filters/f5/bigip.conf.tmpl | 3 +- .../conf.d/log_paths/lp-cisco_acs.conf.tmpl | 2 +- .../lp-common_event_format.conf.tmpl | 18 +- .../log_paths/lp-dell_rsa_secureid.conf.tmpl | 2 +- package/etc/conf.d/sources/rfc5687.conf.tmpl | 1 - package/etc/go_templates/source_network.t | 27 +- package/sbin/entrypoint.sh | 50 ++-- tests/test_cisco_acs.py | 34 +-- tests/test_cisco_wsa.py | 42 +-- tests/test_citrix_netscaler.py | 82 +++++- tests/test_common.py | 79 ------ tests/test_dell_rsa_secureid.py | 15 +- 27 files changed, 865 insertions(+), 292 deletions(-) create mode 100644 deploy/k8s-microk8s/sc4s-ds.yaml create mode 100644 deploy/k8s-microk8s/sc4s-infra.yaml create mode 100644 docs/gettingstarted/k8s-microk8s.md rename docs/{troubleshooting.md => troubleshooting/troubleshoot_SC4S_server.md} (58%) create mode 100644 docs/troubleshooting/troubleshoot_resources.md diff --git a/deploy/k8s-microk8s/sc4s-ds.yaml b/deploy/k8s-microk8s/sc4s-ds.yaml new file mode 100644 index 0000000..dd8c05c --- /dev/null +++ b/deploy/k8s-microk8s/sc4s-ds.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: splunk-sc4s + labels: + app: sc4s +spec: + selector: + matchLabels: + name: splunk-sc4s + template: + metadata: + labels: + name: splunk-sc4s + spec: + tolerations: + # this toleration is to have the daemonset runnable on master nodes + # remove it if your masters can't run pods + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: sc4s + image: localhost:32000/scs:latest + ports: + - containerPort: 514 + envFrom: + - configMapRef: + name: sc4s-env-file + env: + - name: SPLUNK_HEC_TOKEN + valueFrom: + secretKeyRef: + name: splunk-s1-standalone-secrets + key: hec_token + - name: SC4S_SNMP_TRAP_COLLECT + value: "no" + - name: SC4S_CONTAINER_HOST + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: SC4S_RUNTIME_ENV + value: "k8s" + livenessProbe: + httpGet: + path: /healthz + port: 8080 + # initialDelaySeconds: 15 + periodSeconds: 3 + startupProbe: + httpGet: + path: /healthz + port: 8080 + failureThreshold: 30 + periodSeconds: 10 + volumeMounts: + - name: syslog-var + mountPath: "/opt/syslog-ng/var" + - name: sc4s-context + mountPath: /opt/syslog-ng/etc/conf.d/configmap/context + terminationGracePeriodSeconds: 600 + volumes: + - name: syslog-var + persistentVolumeClaim: + claimName: splunk-sc4s-pvc + - name: sc4s-context + configMap: + name: sc4s-context-config diff --git a/deploy/k8s-microk8s/sc4s-infra.yaml b/deploy/k8s-microk8s/sc4s-infra.yaml new file mode 100644 index 0000000..a06203c --- /dev/null +++ b/deploy/k8s-microk8s/sc4s-infra.yaml @@ -0,0 +1,267 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: sc4s-env-file +data: + SPLUNK_HEC_URL: https://splunk-s1-standalone-headless:8088 + SC4S_DEST_SPLUNK_HEC_TLS_VERIFY: "yes" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: sc4s-context-config +data: + # example of a simple property defined using --from-literal + compliance_meta_by_source.conf: |- + filter f_test_test { + # host("something-*" type(glob)) or + # netmask(169.254.100.0/24) + host("cannot_ever_happen") + }; + compliance_meta_by_source.csv: |- + f_test_test,.splunk.index,"will_never_happen_index" + f_test_test,fields.compliance,"pci" + host.csv: |- + 169.254.0.2,HOST,foo.example + splunk_metadata.csv: |- + bluecoat_proxy,index,netproxy + brocade_syslog,index,netops + ArcSight_ArcSight,index,main + Cyber-Ark_Vault,index,netauth + CyberArk_PTA,index,main + Incapsula_SIEMintegration,index,netwaf + Microsoft_Microsoft Windows,index,oswinsec + Microsoft_System or Application Event,index,oswin + checkpoint_splunk,index,netops + checkpoint_splunk_dlp,index,netdlp + checkpoint_splunk_email,index,email + checkpoint_splunk_firewall,index,netfw + checkpoint_splunk_ids,index,netids + checkpoint_splunk_os,index,netops + checkpoint_splunk_sessions,index,netops + checkpoint_splunk_web,index,netproxy + checkpoint_splunk,index,netops + checkpoint_splunk,index,netops + cisco_apic_acl,index,netfw + cisco_apic_events,index,netops + cisco_acs,index,netauth + cisco_asa,index,netfw + cisco_ftd,index,netfw + cisco_ios,index,netops + cisco_ise,index,netauth + cisco_meraki,index,netfw + cisco_nx_os,index,netops + cisco_ucm,index,main + cisco_wsa,index,netproxy + dell_rsa_secureid,index,netauth + citrix_netscaler,index,netfw + local_example,index,main + forcepoint_webprotect,index,netproxy + f5_bigip,index,netops + f5_bigip_access_json,index,netops + f5_bigip_irule,index,netops + f5_bigip_asm,index,netwaf + f5_bigip_nix,index,netops + fortinet_fortios_event,index,netops + fortinet_fortios_log,index,netops + fortinet_fortios_traffic,index,netfw + fortinet_fortios_utm,index,netids + fortinet_fortiweb_attack,index,netids + fortinet_fortiweb_event,index,netops + fortinet_fortiweb_log,index,netops + fortinet_fortiweb_traffic,index,netfw + infoblox_dns,index,netdns + infoblox_dhcp,index,netipam + infoblox_threat,index,netids + juniper_idp,index,netids + juniper_structured,index,netops + juniper_idp_structured,index,netids + juniper_junos_fw_structured,index,netfw + juniper_junos_ids_structured,index,netids + juniper_junos_utm_structured,index,netfw + juniper_junos_aamw_structured,index,netfw + juniper_junos_secintel_structured,index,netfw + juniper_junos_fw,index,netfw + juniper_junos_ids,index,netids + juniper_junos_utm,index,netfw + juniper_netscreen,index,netfw + juniper_legacy,index,netops + mcafee_epo,index,epav + nix_syslog,index,osnix + pan_traffic,index,netfw + pan_threat,index,netproxy + pan_system,index,netops + pan_config,index,netops + pan_hipmatch,index,main + pan_correlation,index,main + pan_userid,index,netauth + pan_unknown,index,netops + pfsense,index,netops + pfsense_filterlog,index,netfw + proofpoint_pps_filter,index,email + proofpoint_pps_sendmail,index,email + sc4s_events,index,main + sc4s_fallback,index,main + sc4s_metrics,index,em_metrics + symantec_ep,index,epav + symantec_brightmail,index,email + ubiquiti_unifi,index,netops + ubiquiti_unifi_fw,index,netfw + ubiquiti_unifi_link,index,netops + ubiquiti_unifi_sudo,index,netops + ubiquiti_unifi_switch,index,netops + ubiquiti_unifi_threat,index,netids + ubiquiti_unifi_wireless,index,netops + vmware_esx,index,main + vmware_horizon,index,main + vmware_nsx,index,main + vmware_vcenter,index,main + zscaler_alerts,index,netops + zscaler_dns,index,netdns + zscaler_fw,index,netfw + zscaler_web,index,netproxy + zscaler_zia_audit,index,netops + zscaler_zia_sandbox,index,main + zscaler_lss,index,netproxy + vendor_product_by_source.conf: |- + filter f_test_test { + host("testvp-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_null_queue { + netmask(169.254.100.0/24) + }; + filter f_brocade_syslog { + host("test_brocade-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_citrix_netscaler { + host("test_ctitrixns-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_dell_rsa_secureid { + host("test_rsasecureid*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_juniper_netscreen { + host("jnpns-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_cisco_meraki { + host("testcm-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_cisco_wsa{ + host("cisco_wsa" type(glob)) + }; + filter f_cisco_wsa11_7{ + host("cisco_wsa11_7" type(glob)) + }; + filter f_cisco_nx_os { + host("csconx-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_f5_bigip { + host("test_f5-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_infoblox { + host("vib-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_pfsense { + host("pfsense-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_proofpoint_pps_filter { + host("pps-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_proofpoint_pps_sendmail { + host("pps-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_schneider_apc { + host("test_apc-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_ubiquiti_unifi_fw { + host("usg-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_tzfixhst { + host("tzfhst-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + filter f_tzfixny { + host("tzfny-*" type(glob)) + #or netmask(xxx.xxx.xxx.xxx/xx) + }; + vendor_product_by_source.csv: |- + f_test_test,sc4s_vendor_product,"test_test" + f_brocade_syslog,sc4s_vendor_product,"brocade_syslog" + f_null_queue,sc4s_vendor_product,"null_queue" + f_cisco_meraki,sc4s_vendor_product,"cisco_meraki" + f_cisco_wsa,sc4s_vendor_product,"cisco_wsa" + f_cisco_wsa11_7,sc4s_vendor_product,"cisco_wsa11_7" + f_citrix_netscaler,sc4s_vendor_product,"citrix_netscaler" + f_dell_rsa_secureid,sc4s_vendor_product,"dell_rsa_secureid" + f_f5_bigip,sc4s_vendor_product,"f5_bigip" + f_infoblox,sc4s_vendor_product,"infoblox" + f_juniper_netscreen,sc4s_vendor_product,"juniper_netscreen" + f_cisco_nx_os,sc4s_vendor_product,"cisco_nx_os" + f_pfsense,sc4s_vendor_product,"pfsense" + f_proofpoint_pps_sendmail,sc4s_vendor_product,"proofpoint_pps_sendmail" + f_proofpoint_pps_filter,sc4s_vendor_product,"proofpoint_pps_filter" + f_schneider_apc,sc4s_vendor_product,"schneider_apc" + f_ubiquiti_unifi_fw,sc4s_vendor_product,"ubiquiti_unifi_fw" + f_tzfixhst,sc4s_time_zone,"Pacific/Honolulu" + f_tzfixny,sc4s_time_zone,"America/New_York" + +--- + +--- +apiVersion: v1 +kind: Service +metadata: + name: sc4s-ext-tcp + annotations: + metallb.universe.tf/allow-shared-ip: sc4s +spec: + ports: + - port: 514 + targetPort: 514 + protocol: TCP + selector: + app: sc4s + type: LoadBalancer + externalTrafficPolicy: Local +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: splunk-sc4s-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500M +--- +apiVersion: v1 +kind: Service +metadata: + name: sc4s-ext-udp + annotations: + metallb.universe.tf/allow-shared-ip: sc4s +spec: + ports: + - port: 514 + targetPort: 514 + protocol: UDP + selector: + app: sc4s + type: LoadBalancer + externalTrafficPolicy: Local +--- + diff --git a/docs/gettingstarted/docker-systemd-general.md b/docs/gettingstarted/docker-systemd-general.md index 60ece81..fe83a6f 100644 --- a/docs/gettingstarted/docker-systemd-general.md +++ b/docs/gettingstarted/docker-systemd-general.md @@ -50,7 +50,6 @@ Environment="SC4S_LOCAL_CONFIG_MOUNT=-v /opt/sc4s/local:/opt/syslog-ng/etc/conf. # Environment="SC4S_TLS_DIR=-v /opt/sc4s/tls:/opt/syslog-ng/tls:z" TimeoutStartSec=0 -Restart=always ExecStartPre=/usr/bin/docker pull $SC4S_IMAGE ExecStartPre=/usr/bin/bash -c "/usr/bin/systemctl set-environment SC4SHOST=$(hostname -s)" diff --git a/docs/gettingstarted/index.md b/docs/gettingstarted/index.md index 7eb766c..e9dc8ec 100644 --- a/docs/gettingstarted/index.md +++ b/docs/gettingstarted/index.md @@ -63,8 +63,13 @@ Install the following: #### Configure the Splunk HTTP Event Collector - Set up the Splunk HTTP Event Collector with the HEC endpoints behind a load balancer (VIP) configured for https round robin *WITHOUT* sticky -session. Alternatively, a list of HEC endpoint URLs can be configured in SC4S (native Syslog-ng load balancing) if no load balancer is in place. In either case, it is -recommended that SC4S traffic be sent to HEC endpoints configured directly on the indexers rather than an intermediate tier of HWFs. Deployments with 10 or fewer Indexers and where HEC is used exclusively for syslog, the recommendation is to use the native load balancing. In all other scenarios the recommendation is to use an external load balacer. If utilizing the native load balancing, be sure to update the configuration when the number and/or names of the indexers change. +session. Alternatively, a list of HEC endpoint URLs can be configured in SC4S (native syslog-ng load balancing) if no load balancer is in +place. In most scenarios the recommendation is to use an external load balancer, as that makes longer term +maintenance simpler by eliminating the need to manually keep the list of HEC URLs specified in sc4s current. However, if a LB is not +available, native load balancing can be used with 10 or fewer Indexers where HEC is used exclusively for syslog. + + In either case, it is _strongly_ recommended that SC4S traffic be sent to HEC endpoints configured directly on the indexers rather than +an intermediate tier of HWFs. - Create a HEC token that will be used by SC4S and ensure the token has access to place events in main, em_metrics, and all indexes used as event destinations. @@ -83,7 +88,8 @@ Splunk type. #### Prerequisites * Linux host with Docker (CE 19.x or greater with Docker Swarm) or Podman enabled, depending on runtime choice (below). -* A network load balancer (NLB) configured for round robin. Note: Special consideration may be required when more advanced products are used. The optimal configuration of the load balancer will round robin each http POST request (not each connection). +* A network load balancer (NLB) configured for round robin. Note: Special consideration may be required when more advanced products are used. +The optimal configuration of the load balancer will round robin each http POST request (not each connection). * The host linux OS receive buffer size should be tuned to match the sc4s default to avoid dropping events (packets) at the network level. The default receive buffer for sc4s is set to 16 MB for UDP traffic, which should be OK for most environments. To set the host OS kernel to match this, edit `/etc/sysctl.conf` using the following whole-byte values corresponding to 16 MB: @@ -135,7 +141,8 @@ net.ipv4.ip_forward=1 Follow these instructions to "stage" SC4S by downloading the container so that it can be loaded "out of band" on a host machine, such as an airgapped system, without internet connectivity. -* Download container image "oci_container.tgz" from our [Github Page](https://github.com/splunk/splunk-connect-for-syslog/releases). The following example downloads v1.12; replace the URL with the latest release or pre-release version as desired. +* Download container image "oci_container.tgz" from our [Github Page](https://github.com/splunk/splunk-connect-for-syslog/releases). +The following example downloads v1.12; replace the URL with the latest release or pre-release version as desired. ``` sudo wget https://github.com/splunk/splunk-connect-for-syslog/releases/download/v1.12.0/oci_container.tar.gz @@ -167,4 +174,8 @@ attempt to obtain the container image via the internet. ``` Environment="SC4S_IMAGE=sc4slocal:latest" ``` - +* Remove the entry +``` +ExecStartPre=/usr/bin/docker pull $SC4S_IMAGE +``` +from the relevant unit file when using systemd, as an external connection to pull the container is no longer needed (or available). diff --git a/docs/gettingstarted/k8s-microk8s.md b/docs/gettingstarted/k8s-microk8s.md new file mode 100644 index 0000000..2c8091d --- /dev/null +++ b/docs/gettingstarted/k8s-microk8s.md @@ -0,0 +1,77 @@ + +# Install MicroK8s - ALPHA + +SUPPORT NOTICE DEPLOYMENT VIA K8S is ALPHA is and not officially supported for production + +The SC4S deployment model with Microk8s uses specific features of this distribution of k8s. +While this may be reproducable with other distributions such an undertaking requires more advanced +awareness and responsibility for the administrator. + +* (metalLB) ensure source IP is preserved +* Bring any operating system (window/centos/rhel/ubuntu/debian) + +This configuration requires as least 2 IP addressed one for host and one for the internal load balancer. +We suggest allocation of 3 ip addresses for the host and 5-10 addresses for later use + +# FAQ + +Question: Why is this "load balancer" ok but others are not? +Answer: While we are using a load balancer with one instance per host the traffic is restricted +to the entry node and one instance of sc4s will run per node. This limits the function of MetalLB to +the same function as a Cluster Manager. + +```bash +#we need to have a normal install of kubectl because of operator scripts +sudo snap install kubectl --classic +# Basic setup of k8s +sudo snap install microk8s --classic --channel=1.18/stable +sudo usermod -a -G microk8s $USER +sudo chown -f -R $USER ~/.kube + +su - $USER +microk8s status --wait-ready +microk8s enable dns metallb rbac storage +microk8s status --wait-ready +mkdir ~/.kube +#tell the default install of kubectl how to talk to our cluster +microk8s.config > $HOME/.kube/config +# +``` + +# Install SC4S + +```bash +git clone https://github.com/splunk/splunk-connect-for-syslog.git +cd splunk-connect-for-syslog +kubectl create ns sc4s +kubectl apply -n sc4s -f deploy/k8s-microk8s/sc4s-infra.yaml +# Important modify the following command to use the correct token +echo -n 'A8AE530F-73C6-E990-704A-963E3623F4D0' > hec_token.txt +kubectl create -n sc4s secret generic sc4s-secrets --from-file=hec_token=./hec_token.txt +rm hec_token.txt +# Edit the values for SPLUNK_HEC_URL and SC4S_DEST_SPLUNK_HEC_TLS_VERIFY +kubectl edit -n sc4s configmap sc4s-env-file +# Deploy sc4s +kubectl apply -n sc4s -f deploy/k8s-microk8s/sc4s-deploy.yaml +# Watch pods use ctrl + c to terminate when running +kubectl get -n sc4s pods -w +# Optional get logs replace with pod name above +kubectl -n sc4s logs splunk-sc4s-22rr6 +``` + +Check Splunk for events + +# Change configuration + +Note change change to the following config will trigger a restart of the container + +```bash +kubectl edit configmap sc4s-env-file +kubectl edit configmap sc4s-context-config +``` + +# Setup for HA with multiple nodes + +See https://microk8s.io/docs/high-availability + +Note three identically size nodes are required for HA \ No newline at end of file diff --git a/docs/gettingstarted/podman-systemd-general.md b/docs/gettingstarted/podman-systemd-general.md index b543f07..4910f45 100644 --- a/docs/gettingstarted/podman-systemd-general.md +++ b/docs/gettingstarted/podman-systemd-general.md @@ -24,7 +24,7 @@ Note that the space on either side of the semicolon in the `ExecStartPost` entry will error out if it is missing. ``` -ExecStartPost=sleep 2 ; conntrack -D -p udp +ExecStartPost=sleep 2 ; conntrack -D -p udp || true ``` This command will delete the old (stale) UDP entries two seconds after the container starts and allow the system to build a new table that @@ -68,7 +68,6 @@ Environment="SC4S_LOCAL_CONFIG_MOUNT=-v /opt/sc4s/local:/opt/syslog-ng/etc/conf. # Environment="SC4S_TLS_DIR=-v /opt/sc4s/tls:/opt/syslog-ng/tls:z" TimeoutStartSec=0 -Restart=always ExecStartPre=/usr/bin/podman pull $SC4S_IMAGE ExecStartPre=/usr/bin/bash -c "/usr/bin/systemctl set-environment SC4SHOST=$(hostname -s)" @@ -81,7 +80,7 @@ ExecStart=/usr/bin/podman run -p 514:514 -p 514:514/udp -p 6514:6514 \ "$SC4S_TLS_DIR" \ --name SC4S \ --rm $SC4S_IMAGE -ExecStartPost=sleep 2 ; conntrack -D -p udp +ExecStartPost=/bin/sleep 2 ; /sbin/conntrack -D -p udp || true Restart=on-success ``` diff --git a/docs/index.md b/docs/index.md index c7c293a..ab0ed84 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,10 +1,10 @@ -# Welcome to Splunk Connect for Syslog +# Welcome to Splunk Connect for Syslog! Splunk Connect for Syslog is an open source packaged solution for -getting data into Splunk using syslog-ng Open Source Edition (Syslog-NG OSE) and the Splunk -HTTP event Collector. +getting data in to Splunk. It is based on the syslog-ng Open Source Edition (Syslog-NG OSE) and transports data to Splunk via the Splunk +HTTP event Collector (HEC) rather than writing events to disk for collection by a Universal Forwarder. -## Project Goals +## Product Goals * Bring a tested configuration and build of syslog-ng OSE to the market that will function consistently regardless of the underlying host's linux distribution * Provide a container with the tested configuration for Docker/K8s that can be more easily deployed than upstream packages directly on a customer OS @@ -14,7 +14,13 @@ HTTP event Collector. ## Support -Splunk Connect for Syslog is an open source product developed by Splunkers with contributions from the community of partners and customers. This unique product will be enhanced, maintained and supported by the community, led by Splunkers with deep subject matter expertise. The primary reason why Splunk is taking this approach is to push product development closer to those that use and depend upon it. This direct connection will help us all be more successful and move at a rapid pace. +* UPDATE! Splunk Connect for Syslog is now officially supported by Splunk. That said, it is still very much an open-source product and +the notes below outlining community support are still highly relevant. + +Splunk Connect for Syslog is an open source product developed by Splunkers with contributions from the community of partners and customers. +This unique product will be enhanced, maintained and supported by the community, led by Splunkers with deep subject matter expertise. The +primary reason why Splunk is taking this approach is to push product development closer to those that use and depend upon it. This direct +connection will help us all be more successful and move at a rapid pace. Post a question to Splunk Answers using the tag "Splunk Connect For Syslog" diff --git a/docs/troubleshooting.md b/docs/troubleshooting/troubleshoot_SC4S_server.md similarity index 58% rename from docs/troubleshooting.md rename to docs/troubleshooting/troubleshoot_SC4S_server.md index e14b1ae..5946e7b 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting/troubleshoot_SC4S_server.md @@ -1,7 +1,15 @@ -#Troubleshooting - -## Startup - +# SC4S Server Startup and Operational Validation + +The following sections will guide the administrator to the most commons solutions to startup and +operational issues with SC4S. In general, if you are just starting out with SC4S and wish to +simply run with the "stock" configuration, startup out of systemd is recommended. If, on the other +hand, you are in the depths of a custom configuration of SC4S with significant modifications (such +as multiple unique ports for sources, hostname/CIDR block configuration for sources, new log paths, +etc.) then it is best to start SC4S with the container runtime command (`podman` or `docker`) +directly from the command line (below). When you are satisfied with the operation, a transition to +systemd can then be made. + +## systemd Errors During SC4S Startup Most issues that occur with startup and operation of sc4s typically involve syntax errors or duplicate listening ports. If you are running out of systemd, you may see this at startup: @@ -9,7 +17,10 @@ running out of systemd, you may see this at startup: [root@sc4s syslog-ng]# systemctl start sc4s Job for sc4s.service failed because the control process exited with error code. See "systemctl status sc4s.service" and "journalctl -xe" for details. ``` -In other cases, there may be nothing untoward after starting with systemd, but the container is not running at all +Follow the checks below to resolve the issue: + +### Is the SC4S container running? +There may be nothing untoward after starting with systemd, but the container is not running at all after checking with `podman logs SC4S` or `podman ps`. A more informative command than `journalctl -xe` is the following, ``` journalctl -b -u sc4s | tail -100 @@ -17,9 +28,11 @@ journalctl -b -u sc4s | tail -100 which will print the last 100 lines of the system journal in far more detail, which should be sufficient to see the specific failure (syntax or runtime) and guide you in troubleshooting why the container exited unexpectedly. +### Does the SC4S container start (and run) properly outside of the systemd service environment? As an alternative to launching via systemd during the initial installation phase, you may wish to test the container startup outside of the -systemd startup environment. The following commmand will launch the container directly from the CLI. This command assumes the local mounted -directories are set up as shown in the "getting started" examples: +systemd startup environment. This alternative should be considered required when undergoing heavy troubleshooting or log path development (e.g. +when `SC4S_DEBUG_CONTAINER` is set to "yes"). The following command will launch the container directly from the CLI. +This command assumes the local mounted directories are set up as shown in the "getting started" examples; adjust for your local requirements: ```bash /usr/bin/podman run -p 514:514 -p 514:514/udp -p 6514:6514 -p 5000-5020:5000-5020 -p 5000-5020:5000-5020/udp \ @@ -33,11 +46,12 @@ directories are set up as shown in the "getting started" examples: If you are using docker, substitute "docker" for "podman" for the container runtime command above. -### Stale Containers (podman) +### Is the container still running (when systemd thinks it's not)? -In rare instances, (especially when starting/stopping often) an SC4S container might not shut down completely when using podman, leaving a -"stale" container behind that is denoted by a very long ID string. You will see this type of output when viewing the journal after a failed -start caused by this condition, or a similar message when the container is run directly from the CLI: +In some instances, (particularly when `SC4S_DEBUG_CONTAINER=yes`) an SC4S container might not shut down completely when starting/stopping +out of systemd, and systemd will attempt to start a new container when one is already running with the `SC4S` name. +You will see this type of output when viewing the journal after a failed start caused by this condition, or a similar message when the container +is run directly from the CLI: ``` Jul 15 18:45:20 sra-sc4s-alln01-02 podman[11187]: Error: error creating container storage: the container name "SC4S" is already in use by "894357502b2a7142d097ea3ca1468d1cb4fbc69959a9817a1bbe145a09d37fb9". You have to remove that container... @@ -46,24 +60,18 @@ Jul 15 18:45:20 sra-sc4s-alln01-02 systemd[1]: sc4s.service: Main process exited To rectify this, simply execute ``` -podman rm -f 894357502b2a7142d097ea3ca1468d1cb4fbc69959a9817a1bbe145a09d37fb9 +podman rm -f SC4S ``` -replacing the long string with whatever container ID is shown in your error message. SC4S should then start normally. - -## Verification of TLS Server +SC4S should then start normally. -To verify the correct configuration of the TLS server use the following command. Use `podman` or `docker` and replace the IP, FQDN, -and port as appropriate: - -```bash - run -ti drwetter/testssl.sh --severity MEDIUM --ip 127.0.0.1 selfsigned.example.com:6510 -``` +* NOTE: This symptom will recur if `SC4S_DEBUG_CONTAINER` is set to "yes". _Do not_ attempt to use systemd when this variable is set; use the +CLI `podman` or `docker` commands directly to start/stop SC4S. -## Validating HEC/token issues (AKA "No data in Splunk") +## HEC/token connection errors (AKA “No data in Splunk”) SC4S performs basic HEC connectivity and index checks at startup. These indicate general connection issues and indexes that may not be -accesible and/or configured on the Splunk side. To check the container logs which contain the results of these tests, run: +accessible and/or configured on the Splunk side. To check the container logs which contain the results of these tests, run: ```bash /usr/bin/ logs SC4S @@ -82,23 +90,23 @@ SC4S_ENV_CHECK_INDEX: Checking main {"text":"Success","code":0} Note the specifics of the indexes that are not configured correctly, and rectify in the Splunk configuration. If this is not addressed properly, you may see output similar to the below when data flows into sc4s: - ``` Mar 16 19:00:06 b817af4e89da syslog-ng[1]: Server returned with a 4XX (client errors) status code, which means we are not authorized or the URL is not found.; url='https://splunk-instance.com:8088/services/collector/event', status_code='400', driver='d_hec#0', location='/opt/syslog-ng/etc/conf.d/destinations/splunk_hec.conf:2:5' Mar 16 19:00:06 b817af4e89da syslog-ng[1]: Server disconnected while preparing messages for sending, trying again; driver='d_hec#0', location='/opt/syslog-ng/etc/conf.d/destinations/splunk_hec.conf:2:5', worker_index='4', time_reopen='10', batch_size='1000' ``` - This is an indication that the standard `d_hec` destination in syslog-ng (which is the route to Splunk) is being rejected by the HEC endpoint. A `400` error (not 404) is normally caused by an index that has not been created on the Splunk side. This can present a serious problem, as just _one_ bad index will "taint" the entire batch (in this case, 1000 events) and prevent _any_ of them from being sent to Splunk. _It is -imperative that the container logs be free of these kinds of errors in production._ +imperative that the container logs be free of these kinds of errors in production._ You can use the alternate HEC debug destination (below) +to help debug this condition by sending direct "curl" commands to the HEC endpoint outside of the SC4S setting. -## Enabling the Alternate Debug Destination +### Enabling the Alternate HEC Debug Destination -To help debug why the `400` errors are ocurring, it is helpful to enable an alternate destination for syslog traffic that will write +To help debug why these `4xx` errors are occurring, it is helpful to enable an alternate destination for syslog traffic that will write the contents of the full JSON payload that is intended to be sent to Splunk via HEC. This destination will contain each event, repackaged -as a `curl` command that can be run directly on the command line to see what the response from the HEC endpoint is. To do this, set -`SC4S_DEST_GLOBAL_ALTERNATES=d_hec_debug` in the `env_file` and restart sc4s. When set, all data destined for Splunk will also be written to +as a `curl` command that can be run directly on the command line to see what the response from the HEC endpoint is. + +To do this, set `SC4S_DEST_GLOBAL_ALTERNATES=d_hec_debug` in the `env_file` and restart sc4s. When set, all data destined for Splunk will also be written to `/opt/sc4s/archive/debug`, and will be further categorized in subdirectories by sourcetype. Here are the things to check: * In `/opt/sc4s/archive/debug`, you will see directories for each sourcetype that sc4s has collected. If you recognize any that you @@ -109,53 +117,63 @@ cause for almost _all_ `400` errors. curl -k -u "sc4s HEC debug:a778f63a-5dff-4e3c-a72c-a03183659e94" "https://splunk.smg.aws:8088/services/collector/event" -d '{"time":"1584556114.271","sourcetype":"sc4s:events","source":"SC4S:s_internal","index":"main","host":"e3563b0ea5d8","fields":{"sc4s_syslog_severity":"notice","sc4s_syslog_facility":"syslog","sc4s_loghost":"e3563b0ea5d8","sc4s_fromhostip":"127.0.0.1"},"event":"syslog-ng starting up; version='3.28.1'"}' ``` * These commands, with minimal modifications (e.g. multiple URLs specified or elements that needs shell escapes) can be run directly on the -command line to determine what, exactly, the HEC endpoint is returning. This can be used to refine th index or other parameter to correct the +command line to determine what, exactly, the HEC endpoint is returning. This can be used to refine the index or other parameter to correct the problem. -## Obtaining "On-the-wire" Raw Events +## SC4S Local Disk Resource Considerations +* Check the HEC connection to Splunk. If the connection is down for a long period of time, the local disk buffer used for backup will exhaust local +disk resources. The size of the local disk buffer is configured in the env_file: [Disk buffer configuration](https://splunk-connect-for-syslog.readthedocs.io/en/master/configuration/#disk-buffer-variables) -In almost all cases during development or troubleshooting, you will need to obtain samples of the messages exactly as they are received by -SC4S. These "raw" events contain the full syslog message (including the `` preamble) and differs from those that appear in Splunk after -processing by sc4s and/or Splunk. This is the only way to determine if SC4S parsers and filters are operating correctly, as raw messages are -needed for "playback" when testing. In addition, the community supporting SC4S will always first ask for raw samples (kind of like the way -Splunk support always asks for "diags") before any development or troubleshooting exercise. +* Check the env_file to see if `SC4S_DEST_GLOBAL_ALTERNATES` is set to `d_hec_debug`,`d_archive` or other file-based destination; all of these will +consume significant local disk space. -Here are some options for obtaining raw logs for one or more sourcetypes: +`d_hec_debug` and `d_archive` are organized by sourcetype; the `du -sh *` command can be used in each subdirectory to find the culprit. -* Run `tcpdump` on the collection interface and display the results in ASCII. You will see events of the form +* Try rebuilding sc4s volume +``` +podman volume rm splunk-sc4s-var +podman volume create splunk-sc4s-var ``` -<165>1 2007-02-15T09:17:15.719Z router1 mgd 3046 UI_DBASE_LOGOUT_EVENT [junos@2636.1.1.1.2.18 username="user"] User 'user' exiting configuration mode +* Try pruning containers ``` -buried in the packet contents. +podman system prune [--all] +``` -* Set the variable `SC4S_SOURCE_STORE_RAWMSG=yes` in `env_file` and restart sc4s. This will store the raw message in a syslog-ng macro called -`RAWMSG` and will be displayed in Splunk for all `fallback` messages. For most other sourcetypes, the `RAWMSG` is _not_ displayed, but can be -surfaced by changing the output template to one of the JSON variants (t_JSON_3164 or t_JSON_5424 depending on RFC message type). See -[SC4S metadata configuration](https://splunk-connect-for-syslog.readthedocs.io/en/develop/configuration/#sc4s-metadata-configuration) for -more details. +## SC4S/kernel UDP Input Buffer Settings -** IMPORTANT! Be sure to turn off the `RAWMSG` variable when you are finished, as it doubles the memory and disk requirements of sc4s. Do not -use in production! +SC4S has a setting that requests a certain buffer size when configuring the UDP sockets. The kernel must have its parameters set to at least the +same size (or greater) than the syslog-ng config is requesting, or the following will occur in the SC4S logs: -* Lastly, you can enable the alternate destination `d_rawmsg` for one or more sourcetypes. This destination will write the raw messages to the -container directory `/opt/syslog-ng/var/archive/rawmsg/` (which is typically mapped locally to `/opt/sc4s/archive`). -Within this directory, the logs are organized by host and time. This method can be useful when raw samples are needed for events that -partially parse (or parse into the wrong sourcetype) and the output template is not JSON (see above). +```bash +/usr/bin/ logs SC4S +``` +Note the output. The following warning message is not a failure condition unless we are reaching the upper limit of hardware performance. +``` +The kernel refused to set the receive buffer (SO_RCVBUF) to the requested size, you probably need to adjust buffer related kernel parameters; so_rcvbuf='1703936', so_rcvbuf_set='425984' +``` +Make changes to /etc/sysctl.conf. Changing receive buffer values here to 16 MB: -## "exec" into the container (advanced) +``` +net.core.rmem_default = 1703936 +net.core.rmem_max = 1703936. +``` +Run following commands for changes to be affected. +``` +sysctl -p restart SC4S +``` + +## SC4S TLS Listener Validation + +To verify the correct configuration of the TLS server use the following command. Replace the IP, FQDN, +and port as appropriate: -You can confirm how the templating process created the actual syslog-ng config files that are in use by "exec'ing in" to the container -and navigating the syslog-ng config filesystem directly. To do this, run ```bash -/usr/bin/podman exec -it SC4S /bin/bash + run -ti drwetter/testssl.sh --severity MEDIUM --ip 127.0.0.1 selfsigned.example.com:6510 ``` -and navigate to `/opt/syslog-ng/etc/` to see the actual config files in use. If you are adept with container operations and syslog-ng -itself, you can modify files directly and reload syslog-ng with the command `kill -1 1` in the container. -You can also run the `/entrypoint.sh` script by hand (or a subset of it, such as everything -but syslog-ng) and have complete control over the templating and underlying syslog-ng process. -This is an advanced topic and futher help can be obtained via the github issue tracker and Slack channels. -When debugging a configuration syntax issue at startup the container must remain running. This can be enabled by adding `SC4S_DEBUG_CONTAINER=yes` to the `env_file`. +## Timezone mismatch in events +By default, SC4S resolves the timezone to GMT. If customer have a preference to use local TZ then set the user TZ preference in Splunk during search time rather than at index time. +[Timezone config documentation](https://docs.splunk.com/Documentation/Splunk/8.0.4/Data/ApplyTimezoneOffsetstotimestamps) ## Dealing with non RFC-5424 compliant sources @@ -181,5 +199,5 @@ logs would be RFC-5424 compliant. Alternatively, an exception could be added to path created) for the data source if the vendor can’t/won’t fix the defect. In this example, the reason `RAWMSG` is not shown in the fields above is because this error message is coming from syslog-ng itself -- -not the filter/log path. In messages of the type `Error processing log message:` where the PROGRAM is shown as `syslog-ng`, that is the +_not_ the filter/log path. In messages of the type `Error processing log message:` where the PROGRAM is shown as `syslog-ng`, that is the clue your incoming message is not RFC-5424 compliant (though it's often close, as is the case here). diff --git a/docs/troubleshooting/troubleshoot_resources.md b/docs/troubleshooting/troubleshoot_resources.md new file mode 100644 index 0000000..c409aab --- /dev/null +++ b/docs/troubleshooting/troubleshoot_resources.md @@ -0,0 +1,81 @@ +# SC4S Logging and Troubleshooting Resources + +## Helpful Linux and Container Commands + +### Linux service (systemd) commands + +- Check service status `systemctl status sc4s` +- Start service `systemctl start service` +- Stop service `systemctl stop service` +- Restart service `systemctl restart service` +- Enabling service at boot `systemctl enable sc4s` +- Query the system journal `journalctl -b -u sc4s` + +### Container Commands + +* NOTE: All container commands below can be run with either runtime (`podman` or `docker`). + +- Container logs `sudo podman> logs SC4S` +- Exec into SC4S container `podman exec -it SC4S bash` +- Rebuilding SC4S volume +``` +podman volume rm splunk-sc4s-var +podman volume create splunk-sc4s-var +``` +- Pull an image or a repository from a registry `podman pull splunk:scs:latest` +- Remove unused data `podman system prune` +- Load an image from a tar archive or STDIN `podman load ` + +## Obtaining "On-the-wire" Raw Events + +In almost all cases during development or troubleshooting, you will need to obtain samples of the messages exactly as they are received by +SC4S. These "raw" events contain the full syslog message (including the `` preamble) and differs from those that appear in Splunk after +processing by sc4s and/or Splunk. This is the only way to determine if SC4S parsers and filters are operating correctly, as raw messages are +needed for "playback" when testing. In addition, the community supporting SC4S will always first ask for raw samples (kind of like the way +Splunk support always asks for "diags") before any development or troubleshooting exercise. + +Here are some options for obtaining raw logs for one or more sourcetypes: + +* Run `tcpdump` on the collection interface and display the results in ASCII. You will see events of the form +``` +<165>1 2007-02-15T09:17:15.719Z router1 mgd 3046 UI_DBASE_LOGOUT_EVENT [junos@2636.1.1.1.2.18 username="user"] User 'user' exiting configuration mode +``` +buried in the packet contents. + +* Set the variable `SC4S_SOURCE_STORE_RAWMSG=yes` in `env_file` and restart sc4s. This will store the raw message in a syslog-ng macro called +`RAWMSG` and will be displayed in Splunk for all `fallback` messages. For most other sourcetypes, the `RAWMSG` is _not_ displayed, but can be +surfaced by changing the output template to one of the JSON variants (t_JSON_3164 or t_JSON_5424 depending on RFC message type). See +[SC4S metadata configuration](https://splunk-connect-for-syslog.readthedocs.io/en/develop/configuration/#sc4s-metadata-configuration) for +more details. + +** IMPORTANT! Be sure to turn off the `RAWMSG` variable when you are finished, as it doubles the memory and disk requirements of sc4s. Do not +use `RAWMSG` in production! + +* Lastly, you can enable the alternate destination `d_rawmsg` for one or more sourcetypes. This destination will write the raw messages to the +container directory `/opt/syslog-ng/var/archive/rawmsg/` (which is typically mapped locally to `/opt/sc4s/archive`). +Within this directory, the logs are organized by host and time. This method can be useful when raw samples are needed for events that +partially parse (or parse into the wrong sourcetype) and the output template is not JSON (see above). + +## "exec" into the container (advanced) + +You can confirm how the templating process created the actual syslog-ng config files that are in use by "exec'ing in" to the container +and navigating the syslog-ng config filesystem directly. To do this, run +```bash +/usr/bin/podman exec -it SC4S /bin/bash +``` +and navigate to `/opt/syslog-ng/etc/` to see the actual config files in use. If you are adept with container operations and syslog-ng +itself, you can modify files directly and reload syslog-ng with the command `kill -1 1` in the container. +You can also run the `/entrypoint.sh` script by hand (or a subset of it, such as everything +but syslog-ng) and have complete control over the templating and underlying syslog-ng process. +This is an advanced topic and futher help can be obtained via the github issue tracker and Slack channels. + +## Keeping a failed container running (even more advanced) + +When debugging a configuration syntax issue at startup, it is often helpful to keep the container running after a syslog-ng startup failure. +In order to facilitate troubleshooting and make "on the fly" syslog-ng configuration changes from within a running container, the container +can be forced to remain running when syslog-ng fails to start (which normally terminates the container). This can be enabled by adding +`SC4S_DEBUG_CONTAINER=yes` to the `env_file`. Use this capability in conjunction with "exec-ing" into the container described above. + +* NOTE: Do _not_ attempt to enable the debug container mode while running out of systemd. Run the container manually from the CLI, as +`podman` or `docker` commands will be required to start, stop, and optionally clean up cruft left behind by the debug process. +Only when `SC4S_DEBUG_CONTAINER` is set to "no" (or completely unset) should systemd startup processing resume. diff --git a/mkdocs.yml b/mkdocs.yml index 0abe7e1..0d7ff19 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -8,6 +8,7 @@ nav: - "Docker CE + systemd": "gettingstarted/docker-systemd-general.md" - "Docker CE + Swarm": "gettingstarted/docker-swarm-general.md" - "Docker CE + Swarm RHEL 7.7": "gettingstarted/docker-swarm-rhel7.md" + - "MicroK8s + Linux": "gettingstarted/k8s-microk8s.md" - "Bring your own Envionment": "gettingstarted/byoe-rhel7.md" - "Quickstart Guide": "gettingstarted/quickstart_guide.md" - Configuration: "configuration.md" @@ -15,7 +16,7 @@ nav: - Development: "developing/index.md" - Sources: - About: sources/index.md - - Brocade: sources/Brocade/index.md + - Brocade: sources/Brocade/index.md - Checkpoint: sources/Checkpoint/index.md - Cisco: sources/Cisco/index.md - Citrix: sources/Citrix/index.md @@ -42,7 +43,9 @@ nav: - VMware: sources/VMWare/index.md - Zscaler: sources/Zscaler/index.md - Performance: "performance.md" - - Troubleshooting: "troubleshooting.md" + - Troubleshooting: + - SC4S Startup and Validation: "troubleshooting/troubleshoot_SC4S_server.md" + - SC4S Logging and Troubleshooting Resources: "troubleshooting/troubleshoot_resources.md" - "Upgrading SC4S": "upgrade.md" - "SC4S FAQ": "faq.md" diff --git a/package/etc/conf.d/conflib/_common/syslog_format.conf b/package/etc/conf.d/conflib/_common/syslog_format.conf index 69652b9..e5f19cd 100644 --- a/package/etc/conf.d/conflib/_common/syslog_format.conf +++ b/package/etc/conf.d/conflib/_common/syslog_format.conf @@ -66,4 +66,8 @@ filter f_msg_is_tcp_json{ match("rfc3164_json" value("fields.sc4s_syslog_format")) or match("tcp_json" value("fields.sc4s_syslog_format")) +}; + +filter f_host_is_ip{ + host('^(((([1]?\d)?\d|2[0-4]\d|25[0-5])\.){3}(([1]?\d)?\d|2[0-4]\d|25[0-5]))|([\da-fA-F]{1,4}(\:[\da-fA-F]{1,4}){7})|(([\da-fA-F]{1,4}:){0,5}::([\da-fA-F]{1,4}:){0,5}[\da-fA-F]{1,4})$') }; \ No newline at end of file diff --git a/package/etc/conf.d/conflib/_splunk/fix_dns.conf b/package/etc/conf.d/conflib/_splunk/fix_dns.conf index c956c33..cc10ead 100644 --- a/package/etc/conf.d/conflib/_splunk/fix_dns.conf +++ b/package/etc/conf.d/conflib/_splunk/fix_dns.conf @@ -18,11 +18,16 @@ class FixHostResolver(object): # try to resolve the IP address try: - ipaddr = log_message['HOST'].decode('utf-8') + ipaddr = log_message['SOURCEIP'].decode('utf-8') hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ipaddr) - name = str(hostname).split('.')[0] - log_message['HOST'] = name + #print(ipaddr) + #print(hostname) + parts=str(hostname).split('.') + name = parts[0] + #print(name) + if len(parts)>1: + log_message['HOST'] = name except: pass @@ -40,7 +45,7 @@ parser p_fix_host_resolver { parser p_add_context_host { add-contextual-data( - selector("${HOST}"), + selector("${SOURCEIP}"), database("conf.d/local/context/host.csv"), ); }; diff --git a/package/etc/conf.d/filters/checkpoint/splunk.conf b/package/etc/conf.d/filters/checkpoint/splunk.conf index cd2b8c4..ea3ff3c 100644 --- a/package/etc/conf.d/filters/checkpoint/splunk.conf +++ b/package/etc/conf.d/filters/checkpoint/splunk.conf @@ -1,63 +1,63 @@ filter f_checkpoint_splunk { match('\|(?:origin_sic_name|originsicname)\=[cC][nN]|\|product\=SmartConsole\|' value("MSG") type("pcre")) or match('\|(?:origin_sic_name|originsicname)\=[cC][nN]|\|product\=SmartConsole\|' value("LEGACY_MSGHDR") type("pcre")) or - match('*|product=Syslog|ifdir=inbound|loguid=*' value("MSG") type("glob")) or - match('*|product=Syslog|ifdir=inbound|loguid=*' value("LEGACY_MSGHDR") type("glob")); + match('\|product=Syslog\|ifdir=inbound\|loguid=' value("MSG")) or + match('\|product=Syslog\|ifdir=inbound\|loguid=' value("LEGACY_MSGHDR")); }; filter f_checkpoint_splunk_alerts { - match('*IOS Profile*' value('.kv.product') type('glob')) - or match('*Device*' value('.kv.product') type('glob')) + match('IOS\h+Profile' value('.kv.product')) + or match('Device' value('.kv.product')) }; filter f_checkpoint_splunk_Change { - match('*Application Control*' value('.kv.product') type('glob')) + match('Application\h+Control' value('.kv.product')) }; filter f_checkpoint_splunk_DLP { - match('*DLP*' value('.kv.product') type('glob')) + match('DLP' value('.kv.product')) }; filter f_checkpoint_splunk_email { - match('*MTA*' value('.kv.product') type('glob')) - or match('*Anti-Spam*' value('.kv.product') type('glob')) - or match('*Anti Spam*' value('.kv.product') type('glob')) + match('MTA' value('.kv.product')) + or match('Anti-Spam' value('.kv.product')) + or match('Anti\h+Spam' value('.kv.product')) }; filter f_checkpoint_splunk_IDS { - match('*IPS*' value('.kv.product') type('glob')) - or match('*WIFI*' value('.kv.product') type('glob')) - or match('*Cellular*' value('.kv.product') type('glob')) + match('IPS' value('.kv.product')) + or match('WIFI' value('.kv.product')) + or match('Cellular' value('.kv.product')) }; filter f_checkpoint_splunk_IDS_Malware { - match('*Threat Emulation*' value('.kv.product') type('glob')) - or match('*Anti-Virus*' value('.kv.product') type('glob')) - or match('*Anti-Bot*' value('.kv.product') type('glob')) - or match('*Threat Extraction*' value('.kv.product') type('glob')) - or match('*Anti-Ransomware*' value('.kv.product') type('glob')) - or match('*Anti-Exploit**' value('.kv.product') type('glob')) - or match('*Forensics*' value('.kv.product') type('glob')) - or match('*OS Exploit*' value('.kv.product') type('glob')) - or (match('*Application*' value('.kv.product') type('glob')) and not match('*Application Control*' value('.kv.product') type('glob'))) - or match('*Text Message*' value('.kv.product') type('glob')) - or match('*Network Access*' value('.kv.product') type('glob')) - or match('*Zero Phishing*' value('.kv.product') type('glob')) + match('Threat\h+Emulation' value('.kv.product')) + or match('Anti-Virus' value('.kv.product')) + or match('Anti-Bot' value('.kv.product')) + or match('Threat\h+Extraction' value('.kv.product')) + or match('Anti-Ransomware' value('.kv.product')) + or match('Anti-Exploit' value('.kv.product')) + or match('Forensics' value('.kv.product')) + or match('OS\h+Exploit' value('.kv.product')) + or (match('Application' value('.kv.product')) and not match('Application Control' value('.kv.product'))) + or match('Text\h+Message' value('.kv.product')) + or match('Network\h+Access' value('.kv.product')) + or match('Zero\h+Phishing' value('.kv.product')) }; filter f_checkpoint_splunk_NetworkSessions { - match('*VPN*' value('.kv.product') type('glob')) - or match('*Mobile*' value('.kv.product') type('glob')) - or match('*VPN*' value('.kv.fw_subproduct') type('glob')) + match('VPN' value('.kv.product')) + or match('Mobile' value('.kv.product')) + or match('VPN' value('.kv.fw_subproduct')) }; filter f_checkpoint_splunk_NetworkTraffic { - match('*Firewall*' value('.kv.product') type('glob')) - and not match('*VPN*' value('.kv.fw_subproduct') type('glob')) + match('Firewall' value('.kv.product')) + and not match('VPN' value('.kv.fw_subproduct')) }; filter f_checkpoint_splunk_Web { - match('*Url Filtering*' value('.kv.product') type('glob')) + match('U[rR][lL]\h+\h+Filtering' value('.kv.product')) }; filter f_checkpoint_splunk_syslog { - match('Syslog' value('.kv.product') type('glob')) + match('Syslog' value('.kv.product')) }; \ No newline at end of file diff --git a/package/etc/conf.d/filters/cisco/cisco_syslog.conf b/package/etc/conf.d/filters/cisco/cisco_syslog.conf index 61da774..8fe0309 100644 --- a/package/etc/conf.d/filters/cisco/cisco_syslog.conf +++ b/package/etc/conf.d/filters/cisco/cisco_syslog.conf @@ -102,17 +102,22 @@ parser cisco-parser-ex{ filter { match('^(\*|\.)$' value("7")); }; - rewrite { set("cisco reported time error : ${8}" value("fields.sc4s_error")); }; - } else { + rewrite { set("cisco reported time error : ${7}" value("fields.cisco_time_error"));}; + }; + if { + filter { + match('^\w\w\w' value("8")); + }; parser { date-parser-nofilter(format( - '%b %d %H:%M:%S.%f', - '%b %d %H:%M:%S', - '%b %d %I:%M:%S %p.%f', - '%b %d %I:%M:%S %p', - '%b %d %Y %H:%M:%S.%f', - '%b %d %Y %H:%M:%S') - template("$8")); - }; + '%b %d %H:%M:%S.%f', + '%b %d %H:%M:%S', + '%b %d %I:%M:%S %p.%f', + '%b %d %I:%M:%S %p', + '%b %d %Y %H:%M:%S.%f', + '%b %d %H:%M:%S.%f', + '%b %d %Y %H:%M:%S') + template("$8")); + }; }; } else { #Cisco AireOS format diff --git a/package/etc/conf.d/filters/citrix/netscalersdx.conf.tmpl b/package/etc/conf.d/filters/citrix/netscalersdx.conf.tmpl index ee9d403..068e380 100644 --- a/package/etc/conf.d/filters/citrix/netscalersdx.conf.tmpl +++ b/package/etc/conf.d/filters/citrix/netscalersdx.conf.tmpl @@ -4,10 +4,21 @@ filter f_citrix_netscaler_sdx_message { flags(store-matches) ); }; +filter f_citrix_netscaler_sdx_AAAmessage { + message( + '^(<\d{1,3}>) ?(\w{1,3} {1,2}\d{1,2} \d{2}:\d{2}:\d{2}) (\[\d+\]: AAA Message :.*)' + flags(store-matches) + ); +}; rewrite r_citrix_netscaler_sdx_message { set("citrix_netscaler" value("fields.sc4s_syslog_format")); set("citrix_netscaler" value("fields.sc4s_vendor_product")); set("$5" value("HOST")); set("$3" value("MESSAGE")); +}; +rewrite r_citrix_netscaler_sdx_AAAmessage { + set("citrix_netscaler" value("fields.sc4s_syslog_format")); + set("citrix_netscaler" value("fields.sc4s_vendor_product")); + set("$3" value("MESSAGE")); }; \ No newline at end of file diff --git a/package/etc/conf.d/filters/f5/bigip.conf.tmpl b/package/etc/conf.d/filters/f5/bigip.conf.tmpl index 7215a90..6180cb9 100644 --- a/package/etc/conf.d/filters/f5/bigip.conf.tmpl +++ b/package/etc/conf.d/filters/f5/bigip.conf.tmpl @@ -1,6 +1,7 @@ filter f_f5_bigip { match("^f5_bigip", value("fields.sc4s_vendor_product")) or match('^\[F5@12276' value("SDATA")) + or program('iControlPortal.cgi') or program("tmsh") or program("mcpd") or program("mprov") @@ -12,7 +13,7 @@ filter f_f5_bigip { }; filter f_f5_bigip_irule { - message("*,f5_irule=*" type(glob)); + message('\,f5_irule\='); }; filter f_f5_bigip_message { diff --git a/package/etc/conf.d/log_paths/lp-cisco_acs.conf.tmpl b/package/etc/conf.d/log_paths/lp-cisco_acs.conf.tmpl index fc1b7a7..eb3ca7b 100644 --- a/package/etc/conf.d/log_paths/lp-cisco_acs.conf.tmpl +++ b/package/etc/conf.d/log_paths/lp-cisco_acs.conf.tmpl @@ -104,4 +104,4 @@ log { flags(flow-control,final); }; -}; +}; \ No newline at end of file diff --git a/package/etc/conf.d/log_paths/lp-common_event_format.conf.tmpl b/package/etc/conf.d/log_paths/lp-common_event_format.conf.tmpl index 2dac3d6..506aeba 100644 --- a/package/etc/conf.d/log_paths/lp-common_event_format.conf.tmpl +++ b/package/etc/conf.d/log_paths/lp-common_event_format.conf.tmpl @@ -16,12 +16,20 @@ parser p_cef_header { }; parser p_cef_ts_rt { - date-parser-nofilter(format('%s') + date-parser-nofilter(format( + '%s.%f', + '%s', + '%b %d %H:%M:%S', + '%b %d %Y %H:%M:%S') template("${.cef.rt}") ); }; parser p_cef_ts_end { - date-parser-nofilter(format('%s') + date-parser-nofilter(format( + '%s.%f', + '%s', + '%b %d %H:%M:%S', + '%b %d %Y %H:%M:%S') template("${.cef.end}") ); }; @@ -66,8 +74,14 @@ log { # If we have an rt or end field that is best we use the If trick here so if this parser fails # We don't get sent to fallback. if { + filter{ + match('^.', value('.cef.rt')) + }; parser (p_cef_ts_rt); } elif { + filter{ + match('^.', value('.cef.end')) + }; parser (p_cef_ts_end); } else { }; #Do nothing this is allows for both rt and end to be missing and still pass with the message ts diff --git a/package/etc/conf.d/log_paths/lp-dell_rsa_secureid.conf.tmpl b/package/etc/conf.d/log_paths/lp-dell_rsa_secureid.conf.tmpl index 7ca852e..44ec5f0 100644 --- a/package/etc/conf.d/log_paths/lp-dell_rsa_secureid.conf.tmpl +++ b/package/etc/conf.d/log_paths/lp-dell_rsa_secureid.conf.tmpl @@ -37,7 +37,7 @@ log { #parse the date date-parser-nofilter(format( '%Y-%m-%d %H:%M:%S,%f') - template("${.rsa.time} ${.rsa.ms}") + template("${LEGACY_MSGHDR} ${.rsa.time},${.rsa.ms}") ); }; if { diff --git a/package/etc/conf.d/sources/rfc5687.conf.tmpl b/package/etc/conf.d/sources/rfc5687.conf.tmpl index b5044b9..43fddc8 100644 --- a/package/etc/conf.d/sources/rfc5687.conf.tmpl +++ b/package/etc/conf.d/sources/rfc5687.conf.tmpl @@ -5,7 +5,6 @@ source s_ietf { transport("tcp") port(601) ip-protocol(4) - keep-hostname(yes) keep-timestamp(yes) use-dns(no) use-fqdn(no) diff --git a/package/etc/go_templates/source_network.t b/package/etc/go_templates/source_network.t index cdc1a2f..8a9408c 100644 --- a/package/etc/go_templates/source_network.t +++ b/package/etc/go_templates/source_network.t @@ -42,7 +42,6 @@ source s_{{ .port_id }} { max-connections({{getenv "SC4S_SOURCE_TCP_MAX_CONNECTIONS" "2000"}}) log-iw-size({{getenv "SC4S_SOURCE_TCP_IW_SIZE" "20000000"}}) log-fetch-limit({{getenv "SC4S_SOURCE_TCP_FETCH_LIMIT" "2000"}}) - keep-hostname(yes) keep-timestamp(yes) use-dns(no) use-fqdn(no) @@ -60,17 +59,17 @@ source s_{{ .port_id }} { max-connections({{getenv "SC4S_SOURCE_TCP_MAX_CONNECTIONS" "2000"}}) log-iw-size({{getenv "SC4S_SOURCE_TCP_IW_SIZE" "20000000"}}) log-fetch-limit({{getenv "SC4S_SOURCE_TCP_FETCH_LIMIT" "2000"}}) - keep-hostname(yes) keep-timestamp(yes) use-dns(no) use-fqdn(no) chain-hostnames(off) flags(validate-utf8, no-parse {{- if (conv.ToBool (getenv "SC4S_SOURCE_STORE_RAWMSG" "no")) }} store-raw-message {{- end}}) - tls(allow-compress(yes) + tls(allow-compress(yes) key-file("/opt/syslog-ng/tls/server.key") cert-file("/opt/syslog-ng/tls/server.pem") ssl-options({{- getenv "SC4S_SOURCE_TLS_OPTIONS" "no-sslv2, no-sslv3, no-tlsv1" }}) cipher-suite("{{- getenv "SC4S_SOURCE_TLS_CIPHER_SUITE" "HIGH:!aNULL:!eNULL:!kECDH:!aDH:!RC4:!3DES:!CAMELLIA:!MD5:!PSK:!SRP:!KRB5:@STRENGTH" }}") + peer-verify(no) ) ); {{- end }} @@ -125,6 +124,13 @@ source s_{{ .port_id }} { template("$2")); }; rewrite(r_citrix_netscaler_sdx_message); + } elif { + filter(f_citrix_netscaler_sdx_AAAmessage); + parser { + date-parser-nofilter(format('%b %d %H:%M:%S') + template("$2")); + }; + rewrite(r_citrix_netscaler_sdx_AAAmessage); }; {{ else if eq .parser "cisco_ucm" }} parser (p_cisco_ucm_date); @@ -157,6 +163,13 @@ source s_{{ .port_id }} { template("$2")); }; rewrite(r_citrix_netscaler_sdx_message); + } elif { + filter(f_citrix_netscaler_sdx_AAAmessage); + parser { + date-parser-nofilter(format('%b %d %H:%M:%S') + template("$2")); + }; + rewrite(r_citrix_netscaler_sdx_AAAmessage); } elif { filter(f_f5_bigip_message); rewrite{ @@ -253,15 +266,11 @@ source s_{{ .port_id }} { rewrite(r_set_splunk_default); {{ if eq (getenv "SC4S_USE_REVERSE_DNS" "yes") "yes" }} if { - filter { - host('((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))') - }; + filter(f_host_is_ip); parser(p_add_context_host); }; if { - filter { - host('((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))') - }; + filter(f_host_is_ip); parser(p_fix_host_resolver); }; {{ end }} diff --git a/package/sbin/entrypoint.sh b/package/sbin/entrypoint.sh index 73657c5..02250b8 100755 --- a/package/sbin/entrypoint.sh +++ b/package/sbin/entrypoint.sh @@ -48,35 +48,49 @@ trap 'kill ${!}; term_handler' SIGTERM mkdir -p /opt/syslog-ng/etc/conf.d/local/context/ mkdir -p /opt/syslog-ng/etc/conf.d/local/config/ + + cp /opt/syslog-ng/etc/context_templates/* /opt/syslog-ng/etc/conf.d/local/context for file in /opt/syslog-ng/etc/conf.d/local/context/*.example ; do cp --verbose -n $file ${file%.example}; done +if [ "$SC4S_RUNTIME_ENV" == "k8s" ] +then + mkdir -p /opt/syslog-ng/etc/conf.d/configmap/context/ + mkdir -p /opt/syslog-ng/etc/conf.d/configmap/config/ + # Add new entries + temp_file=$(mktemp) + awk '{print $0}' /opt/syslog-ng/etc/conf.d/configmap/context/splunk_metadata.csv /opt/syslog-ng/etc/context_templates/splunk_metadata.csv.example | grep -v '^#' | sort -b -t ',' -k1,2 -u > $temp_file + cp -f $temp_file /opt/syslog-ng/etc/conf.d/local/context/splunk_metadata.csv + +else + # splunk_index.csv updates + # Remove comment headers from existing config + touch /opt/syslog-ng/etc/conf.d/local/context/splunk_metadata.csv + if [ -f /opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv ]; then + LEGACY_SPLUNK_INDEX_FILE=/opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv + fi -# splunk_index.csv updates -# Remove comment headers from existing config -touch /opt/syslog-ng/etc/conf.d/local/context/splunk_metadata.csv -if [ -f /opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv ]; then - LEGACY_SPLUNK_INDEX_FILE=/opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv -fi -# Add new entries -temp_file=$(mktemp) -awk '{print $0}' ${LEGACY_SPLUNK_INDEX_FILE} /opt/syslog-ng/etc/conf.d/local/context/splunk_metadata.csv /opt/syslog-ng/etc/context_templates/splunk_metadata.csv.example | grep -v '^#' | sort -b -t ',' -k1,2 -u > $temp_file -cp -f $temp_file /opt/syslog-ng/etc/conf.d/local/context/splunk_metadata.csv -# We don't need this file any longer -rm -f /opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv.example || true -if [ -f /opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv ]; then - cp -f /opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv /opt/syslog-ng/etc/conf.d/local/context/splunk_index.deprecated - rm /opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv -fi -cp --verbose -R -f /opt/syslog-ng/etc/local_config/* /opt/syslog-ng/etc/conf.d/local/config/ + # Add new entries + temp_file=$(mktemp) + awk '{print $0}' ${LEGACY_SPLUNK_INDEX_FILE} /opt/syslog-ng/etc/conf.d/local/context/splunk_metadata.csv /opt/syslog-ng/etc/context_templates/splunk_metadata.csv.example | grep -v '^#' | sort -b -t ',' -k1,2 -u > $temp_file + cp -f $temp_file /opt/syslog-ng/etc/conf.d/local/context/splunk_metadata.csv + # We don't need this file any longer + rm -f /opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv.example || true + if [ -f /opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv ]; then + cp -f /opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv /opt/syslog-ng/etc/conf.d/local/context/splunk_index.deprecated + rm /opt/syslog-ng/etc/conf.d/local/context/splunk_index.csv + fi + cp --verbose -R -f /opt/syslog-ng/etc/local_config/* /opt/syslog-ng/etc/conf.d/local/config/ +fi mkdir -p /opt/syslog-ng/var/log # Test HEC Connectivity if [ "$SC4S_DEST_SPLUNK_HEC_GLOBAL" != "no" ] then HEC=$(echo '{{- getenv "SPLUNK_HEC_URL" | strings.ReplaceAll "/services/collector" "" | strings.ReplaceAll "/event" "" | regexp.ReplaceLiteral "[, ]+" "/services/collector/event " }}/services/collector/event' | gomplate | cut -d' ' -f 1) + NO_VERIFY=$(echo '{{- if not (conv.ToBool (getenv "SC4S_DEST_SPLUNK_HEC_TLS_VERIFY" "yes")) }}-k{{- end}}' | gomplate) SC4S_DEST_SPLUNK_HEC_FALLBACK_INDEX=$(cat /opt/syslog-ng/etc/conf.d/local/context/splunk_metadata.csv | grep ',index,' | grep sc4s_events | cut -d, -f 3) export SC4S_DEST_SPLUNK_HEC_FALLBACK_INDEX - if curl -s -S -k "${HEC}?/index=${SC4S_DEST_SPLUNK_HEC_FALLBACK_INDEX}" -H "Authorization: Splunk ${SPLUNK_HEC_TOKEN}" -d '{"event": "HEC TEST EVENT", "sourcetype": "SC4S:PROBE"}' 2>&1 | grep -v '{"text":"Success","code":0}' + if curl -s -S ${NO_VERIFY} "${HEC}?/index=${SC4S_DEST_SPLUNK_HEC_FALLBACK_INDEX}" -H "Authorization: Splunk ${SPLUNK_HEC_TOKEN}" -d '{"event": "HEC TEST EVENT", "sourcetype": "SC4S:PROBE"}' 2>&1 | grep -v '{"text":"Success","code":0}' then echo -e "SC4S_ENV_CHECK_HEC: Invalid Splunk HEC URL, invalid token, or other HEC connectivity issue.\nStartup will continue to prevent data loss if this is a transient failure." else diff --git a/tests/test_cisco_acs.py b/tests/test_cisco_acs.py index a3b89f0..5ec5b6e 100644 --- a/tests/test_cisco_acs.py +++ b/tests/test_cisco_acs.py @@ -13,6 +13,7 @@ env = Environment() + def test_cisco_acs_single(record_property, setup_wordlist, setup_splunk, setup_sc4s): host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist)) @@ -24,14 +25,18 @@ def test_cisco_acs_single(record_property, setup_wordlist, setup_splunk, setup_s tzoffset = tzoffset[0:3] + ":" + tzoffset[3:] epoch = epoch[:-3] - mt = env.from_string( - "{{ mark }} {{ bsd }} {{ host }} CSCOacs_Passed_Authentications 0765855540 1 0 {{ date }} {{ time }} {{ tzoffset }} 0178632943 5202 NOTICE Device-Administration: Command Authorization succeeded, ACSVersion=acs-5.8.1.4-B.462.x86_64, ConfigVersionId=16489, Device IP Address=10.0.0.93, DestinationIPAddress=10.0.0.10, DestinationPort=49, UserName=nsdevman, CmdSet=[ CmdAV=show CmdArgAV=vpn-sessiondb CmdArgAV=full CmdArgAV=ra-ikev2-ipsec ], Protocol=Tacacs, MatchedCommandSet=fw3, RequestLatency=11, Type=Authorization, Privilege-Level=15, Authen-Type=ASCII, Service=None, User=nsdevman, Port=443, Remote-Address=10.0.0.15, Authen-Method=TacacsPlus, Service-Argument=shell, AcsSessionID=mnsvdcfpiuac03/359448835/9871764, AuthenticationIdentityStore=AD1, AuthenticationMethod=Lookup, SelectedAccessService=Default Device Admin, SelectedCommandSet=fw3, IdentityGroup=IdentityGroup:All Groups:SystemID, Step=13005 , Step=15008 , Step=15004 , Step=15012 , Step=15041 , Step=15004 , Step=15013 , Step=24210 , Step=24212 , Step=24432 , Step=24325 , Step=24313 , Step=24319 , Step=24323 , Step=24420 , Step=24355 , Step=24416 , Step=22037 , Step=15044 , Step=15035 , Step=15042 , Step=15036 , Step=15004 , Step=15018 , Step=13024 , Step=13034 , SelectedAuthenticationIdentityStores=Internal Users, NetworkDeviceName=devicenamehere, NetworkDeviceGroups=Device Type:All Device Types:Firewall:Cisco Systems:Firewall:ASA5545, NetworkDeviceGroups=Location:All Locations:MN, ServiceSelectionMatchedRule=TACACS, IdentityPolicyMatchedRule=Firewall, AuthorizationPolicyMatchedRule=nsdevman, AD-User-Candidate-Identities=nsdevman@ent.example.corp, AD-User-DNS-Domain=ent.example.corp, AD-User-NetBios-Name=AD-ENT, AD-User-Resolved-Identities=nsdevman@ent.example.corp, AD-User-Join-Point=ENT.example.CORP, AD-User-Resolved-DNs=CN=nsdevman\,OU=Service Accounts\,OU=CAO\,OU=ENT\,DC=ent\,DC=wfb\,DC=example\,DC=corp, StepData=10=nsdevman, StepData=11=ent.example.corp, StepData=12=example.corp, StepData=15=ent.example.corp, AD-Domain=ent.example.corp, IdentityAccessRestricted=false, UserIdentityGroup=IdentityGroup:All Groups:SystemID, Cisco-Firewall=Superuser, Firewall=Superuser, NetSec-CSM=User, NetSec-Logging=Engineer, Response={Type=Authorization; Author-Reply-Status=PassAdd; ExternalIdentityStoreName=AD1; }\n") - message = mt.render(mark="<165>", bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset) + "{{ mark }} {{ bsd }} {{ host }} CSCOacs_Passed_Authentications 0765855540 1 0 {{ date }} {{ time }} {{ tzoffset }} 0178632943 5202 NOTICE Device-Administration: Command Authorization succeeded, ACSVersion=acs-5.8.1.4-B.462.x86_64, ConfigVersionId=16489, Device IP Address=10.0.0.93, DestinationIPAddress=10.0.0.10, DestinationPort=49, UserName=nsdevman, CmdSet=[ CmdAV=show CmdArgAV=vpn-sessiondb CmdArgAV=full CmdArgAV=ra-ikev2-ipsec ], Protocol=Tacacs, MatchedCommandSet=fw3, RequestLatency=11, Type=Authorization, Privilege-Level=15, Authen-Type=ASCII, Service=None, User=nsdevman, Port=443, Remote-Address=10.0.0.15, Authen-Method=TacacsPlus, Service-Argument=shell, AcsSessionID=mnsvdcfpiuac03/359448835/9871764, AuthenticationIdentityStore=AD1, AuthenticationMethod=Lookup, SelectedAccessService=Default Device Admin, SelectedCommandSet=fw3, IdentityGroup=IdentityGroup:All Groups:SystemID, Step=13005 , Step=15008 , Step=15004 , Step=15012 , Step=15041 , Step=15004 , Step=15013 , Step=24210 , Step=24212 , Step=24432 , Step=24325 , Step=24313 , Step=24319 , Step=24323 , Step=24420 , Step=24355 , Step=24416 , Step=22037 , Step=15044 , Step=15035 , Step=15042 , Step=15036 , Step=15004 , Step=15018 , Step=13024 , Step=13034 , SelectedAuthenticationIdentityStores=Internal Users, NetworkDeviceName=devicenamehere, NetworkDeviceGroups=Device Type:All Device Types:Firewall:Cisco Systems:Firewall:ASA5545, NetworkDeviceGroups=Location:All Locations:MN, ServiceSelectionMatchedRule=TACACS, IdentityPolicyMatchedRule=Firewall, AuthorizationPolicyMatchedRule=nsdevman, AD-User-Candidate-Identities=nsdevman@ent.example.corp, AD-User-DNS-Domain=ent.example.corp, AD-User-NetBios-Name=AD-ENT, AD-User-Resolved-Identities=nsdevman@ent.example.corp, AD-User-Join-Point=ENT.example.CORP, AD-User-Resolved-DNs=CN=nsdevman\,OU=Service Accounts\,OU=CAO\,OU=ENT\,DC=ent\,DC=wfb\,DC=example\,DC=corp, StepData=10=nsdevman, StepData=11=ent.example.corp, StepData=12=example.corp, StepData=15=ent.example.corp, AD-Domain=ent.example.corp, IdentityAccessRestricted=false, UserIdentityGroup=IdentityGroup:All Groups:SystemID, Cisco-Firewall=Superuser, Firewall=Superuser, NetSec-CSM=User, NetSec-Logging=Engineer, Response={Type=Authorization; Author-Reply-Status=PassAdd; ExternalIdentityStoreName=AD1; }\n" + ) + message = mt.render( + mark="<165>", bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset + ) sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) - st = env.from_string("search _time={{ epoch }} index=netauth host=\"{{ host }}\" sourcetype=\"cisco:acs\"") + st = env.from_string( + 'search _time={{ epoch }} index=netauth host="{{ host }}" sourcetype="cisco:acs"' + ) search = st.render(host=host, epoch=epoch) resultCount, eventCount = splunk_single(setup_splunk, search) @@ -42,6 +47,7 @@ def test_cisco_acs_single(record_property, setup_wordlist, setup_splunk, setup_s assert resultCount == 1 + def test_cisco_acs_multi(record_property, setup_wordlist, setup_splunk, setup_sc4s): host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist)) @@ -54,20 +60,16 @@ def test_cisco_acs_multi(record_property, setup_wordlist, setup_splunk, setup_sc epoch = epoch[:-3] mt = env.from_string( - "{{ mark }} {{ bsd }} {{ host }} CSCOacs_Passed_Authentications 0000000002 2 0 {{ date }} {{ time }} {{ tzoffset }} 0000008450 5203 NOTICE Device-Administration: Session Authorization succeeded, ACSVersion=acs-5.2.0.26-B.3075, ConfigVersionId=117, Device IP Address=192.168.26.137, UserName=edward, CmdSet=[ CmdAV= ], Protocol=Tacacs, RequestLatency=10, NetworkDeviceName=switch, Type=Authorization, Privilege-Level=1, Authen-Type=ASCII, Service=Login, User=edward, Port=tty2, Remote-Address=10.78.167.190, Authen-Method=TacacsPlus, Service-Argument=shell, AcsSessionID=ACS41/101085887/112, AuthenticationIdentityStore=Internal Users, AuthenticationMethod=Lookup, SelectedAccessService=Default Device Admin, SelectedShellProfile=Permit Access, IdentityGroup=IdentityGroup:All Groups, Step=13005 , Step=15008 , Step=15004 , Step=15012 , Step=15041 , Step=15006 , Step=15013 , Step=24210 , Step=24212 , Step=22037 , Step=15044 , Step=15035 , Step=15042 , Step=15036 , Step=15004 , Step=15017 , Step=13034 , \n") - message = mt.render(mark="<165>", bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset) - sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) - - # Generate new datetime for second message; not used in log path parser so actually could be anything - dt = datetime.datetime.now() + datetime.timedelta(seconds=1) - bsd = dt.strftime("%b %d %H:%M:%S") - - mt = env.from_string( - "{{ mark }} {{ bsd }} {{ host }} CSCOacs_Passed_Authentications 0000000002 2 1 Step=13015 , SelectedAuthenticationIdentityStores=Internal Users, NetworkDeviceGroups=s1Migrated_NDGs:All s1Migrated_NDGs, NetworkDeviceGroups=Device Type:All Device Types, NetworkDeviceGroups=Location:All Locations, ServiceSelectionMatchedRule=Rule-2, IdentityPolicyMatchedRule=Default, AuthorizationPolicyMatchedRule=Rule-0, Action=Login, Privilege-Level=1, Authen-Type=ASCII, Service=Login, Remote-Address=10.78.167.190, UserIdentityGroup=IdentityGroup:All\n") - message = mt.render(mark="<165>", bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset) + "{{ mark }} {{ bsd }} {{ host }} CSCOacs_Passed_Authentications 0000000002 2 0 {{ date }} {{ time }} {{ tzoffset }} 0000008450 5203 NOTICE Device-Administration: Session Authorization succeeded, ACSVersion=acs-5.2.0.26-B.3075, ConfigVersionId=117, Device IP Address=192.168.26.137, UserName=edward, CmdSet=[ CmdAV= ], Protocol=Tacacs, RequestLatency=10, NetworkDeviceName=switch, Type=Authorization, Privilege-Level=1, Authen-Type=ASCII, Service=Login, User=edward, Port=tty2, Remote-Address=10.78.167.190, Authen-Method=TacacsPlus, Service-Argument=shell, AcsSessionID=ACS41/101085887/112, AuthenticationIdentityStore=Internal Users, AuthenticationMethod=Lookup, SelectedAccessService=Default Device Admin, SelectedShellProfile=Permit Access, IdentityGroup=IdentityGroup:All Groups, Step=13005 , Step=15008 , Step=15004 , Step=15012 , Step=15041 , Step=15006 , Step=15013 , Step=24210 , Step=24212 , Step=22037 , Step=15044 , Step=15035 , Step=15042 , Step=15036 , Step=15004 , Step=15017 , Step=13034 , \n{{ mark }} {{ bsd }} {{ host }} CSCOacs_Passed_Authentications 0000000002 2 1 Step=13015 , SelectedAuthenticationIdentityStores=Internal Users, NetworkDeviceGroups=s1Migrated_NDGs:All s1Migrated_NDGs, NetworkDeviceGroups=Device Type:All Device Types, NetworkDeviceGroups=Location:All Locations, ServiceSelectionMatchedRule=Rule-2, IdentityPolicyMatchedRule=Default, AuthorizationPolicyMatchedRule=Rule-0, Action=Login, Privilege-Level=1, Authen-Type=ASCII, Service=Login, Remote-Address=10.78.167.190, UserIdentityGroup=IdentityGroup:All\n" + ) + message = mt.render( + mark="<165>", bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset + ) sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) - st = env.from_string("search _time={{ epoch }} index=netauth host=\"{{ host }}\" sourcetype=\"cisco:acs\" \"Step=13015\"") + st = env.from_string( + 'search _time={{ epoch }} index=netauth host="{{ host }}" sourcetype="cisco:acs" "Step=13015"' + ) search = st.render(host=host, epoch=epoch) resultCount, eventCount = splunk_single(setup_splunk, search) diff --git a/tests/test_cisco_wsa.py b/tests/test_cisco_wsa.py index ff3e1f8..19235cb 100644 --- a/tests/test_cisco_wsa.py +++ b/tests/test_cisco_wsa.py @@ -10,9 +10,9 @@ testdata_squid_11_7 = [ -'{{ mark }}{{ bsd }} {{ host }} 1588851315.000 382 10.0.0.13 TCP_CLIENT_REFRESH_MISS_SSL/201 4646 GET http://test_web.com/page2/b.txt Conner_Fitzerald DEFAULT_PARENT/www.xxxxxxx14.com application/x-javascript OTHER_382-NONE-CyberRange_Inside_NoAuth-OMSPolicy-random_policy-random_policy-DIRECT "Anonymous_Suspect_Vendor" "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52" - -', -'{{ mark }}{{ bsd }} {{ host }} 1588850982.000 331 10.0.0.12 TCP_DENIED/403 3197 POST http://test_web.net/contents/content5.jpg Tom_Lawrence DIRECT/www.xxxxxxx7.com application/x-javascript DEFAULT_CASE_331-Auth-APJC_Cisco_Corporate-OMSPolicy-DefaultGroup-NONE-DefaultRouting "Anonymous_Suspect_Vendor" "Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025" - -', -'{{ mark }}{{ bsd }} {{ host }} 1588851529.000 252 10.0.0.2 NONE/504 3040 GET http://test_web.net/users/user5.jpg Tom_Lawrence DIRECT/www.xxxxxxx5.com application/pkix-crl PASSTHRU_ADMIN_252-Decrypt_VFS-WebxOnly-RFS_Transparent_Proxy_Test-random_policy-NONE-DefaultRouting "abcd" "Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025" - -' +'{{ mark }}{{ bsd }} {{ host }} {{ wsatime }} 382 10.0.0.13 TCP_CLIENT_REFRESH_MISS_SSL/201 4646 GET http://test_web.com/page2/b.txt Conner_Fitzerald DEFAULT_PARENT/www.xxxxxxx14.com application/x-javascript OTHER_382-NONE-CyberRange_Inside_NoAuth-OMSPolicy-random_policy-random_policy-DIRECT "Anonymous_Suspect_Vendor" "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52" - -', +'{{ mark }}{{ bsd }} {{ host }} {{ wsatime }} 331 10.0.0.12 TCP_DENIED/403 3197 POST http://test_web.net/contents/content5.jpg Tom_Lawrence DIRECT/www.xxxxxxx7.com application/x-javascript DEFAULT_CASE_331-Auth-APJC_Cisco_Corporate-OMSPolicy-DefaultGroup-NONE-DefaultRouting "Anonymous_Suspect_Vendor" "Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025" - -', +'{{ mark }}{{ bsd }} {{ host }} {{ wsatime }} 252 10.0.0.2 NONE/504 3040 GET http://test_web.net/users/user5.jpg Tom_Lawrence DIRECT/www.xxxxxxx5.com application/pkix-crl PASSTHRU_ADMIN_252-Decrypt_VFS-WebxOnly-RFS_Transparent_Proxy_Test-random_policy-NONE-DefaultRouting "abcd" "Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025" - -' ] testdata_l4tm = [ @@ -24,13 +24,13 @@ '{{ mark }}{{ bsd }} {{ host }} Mon May 04 12:59:58 2020 Info: Time offset from UTC: 113 seconds', ] testdata_squid = [ -'{{ mark }}{{ bsd }} {{ host }} 1588851279.000 184 10.0.0.6 TCP_CLIENT_REFRESH_MISS/404 461 POST http://test_web.net/users/user2.jpg - DEFAULT_PARENT/www.xxxxxxx15.com application/javascript DEFAULT_CASE_184-NONE-CyberRange_DC_NoAuth-RFS_Transparent_Proxy_Test-random_policy-DefaultGroup-RoutingPolicy "abcd" 486', -'{{ mark }}{{ bsd }} {{ host }} 1588851133.000 258 10.0.0.12 TCP_MISS/200 4687 GET http://test_web.net/users/user2.jpg Tom_Lawrence DIRECT/www.xxxxxxx15.com image/gif BLOCK_AMW_RESP_URL_258-Allow_All_iDevices-APJC_Cisco_Corporate-RFS_Transparent_Proxy_Test-NONE-random_policy-random_policy "random_name"', -'{{ mark }}{{ bsd }} {{ host }} 1588851234.000 17 10.0.0.5 TCP_CLIENT_REFRESH_MISS_SSL/200 1939 HEAD http://test_web.net/contents/content4.jpg - NONE/www.xxxxxxx15.com application/javascript ALLOW_WBRS_17-AccessPolicy-CyberRange_Inside_NoAuth-RFS_Transparent_Proxy_Test-DefaultGroup-random_policy-RoutingPolicy - 486', -'{{ mark }}{{ bsd }} {{ host }} 1588850961.000 245 2001:b8f9:c5c2:f730::2 TCP_DENIED/403 0 GET http://test_web.net/users/user1.jpg Alexei_Romanov NONE/www.xxxxxxx6.com application/x-javascript BLOCK_WEBCAT_245-Allow_All_iDevices-CyberRange_Inside_NoAuth-OMSPolicy-DataSecurityPolicy-DefaultGroup-DIRECT -', -'{{ mark }}{{ bsd }} {{ host }} 1588850940.000 26 2001:44c4:cf35:1b78::6 TCP_MISS/204 4525 POST http://test_web.com/page1/a.txt Andy_Lloyd DIRECT/www.xxxxxxx3.com image/jpeg DEFAULT_CASE_26-NONE-CyberRange_Inside_NoAuth-OMSPolicy-DataSecurityPolicy-ExternalDLPolicy-RoutingPolicy "Anonymous_Suspect_Vendor" 100', -'{{ mark }}{{ bsd }} {{ host }} 1588851442.000 6 10.0.0.7 TCP_CLIENT_REFRESH_MISS/404 1932 GET http://test_web.com/page2/b.txt - DEFAULT_PARENT/www.xxxxxxx8.com - DEFAULT_CASE_6-AP_Subnet_2-NONE-RFS_Transparent_Proxy_Test-NONE-ExternalDLPolicy-RoutingPolicy <nc,5.0,-,"-",-,-,-,-,"-",-,-,-,"-",-,-,"-","-",-,-,nc,-,"-","-","Unknown","Unknown","-","-",0.63,0,-,"-","-",-,"-",-,-,"-","-"> - "03/Jan/2015:07:09:50 +1100" NONE -', -'{{ mark }}{{ bsd }} {{ host }} 1588850950.000 262 10.0.0.7 TCP_MISS_SSL/204 953 POST http://test_web.net/contents/content3.jpg Alexei_Romanov NONE/www.xxxxxxx10.com application/x-javascript DEFAULT_CASE_262-Internet_Access_with_Streaming-ID.ACMETECHISE-NONE-DefaultGroup-random_policy-RoutingPolicy "Anonymous_Suspect_Vendor" 123 "07/052020:11:29:10 +1332" NONE "Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.15"' +'{{ mark }}{{ bsd }} {{ host }} {{ wsatime }} 184 10.0.0.6 TCP_CLIENT_REFRESH_MISS/404 461 POST http://test_web.net/users/user2.jpg - DEFAULT_PARENT/www.xxxxxxx15.com application/javascript DEFAULT_CASE_184-NONE-CyberRange_DC_NoAuth-RFS_Transparent_Proxy_Test-random_policy-DefaultGroup-RoutingPolicy "abcd" 486', +'{{ mark }}{{ bsd }} {{ host }} {{ wsatime }} 258 10.0.0.12 TCP_MISS/200 4687 GET http://test_web.net/users/user2.jpg Tom_Lawrence DIRECT/www.xxxxxxx15.com image/gif BLOCK_AMW_RESP_URL_258-Allow_All_iDevices-APJC_Cisco_Corporate-RFS_Transparent_Proxy_Test-NONE-random_policy-random_policy "random_name"', +'{{ mark }}{{ bsd }} {{ host }} {{ wsatime }} 17 10.0.0.5 TCP_CLIENT_REFRESH_MISS_SSL/200 1939 HEAD http://test_web.net/contents/content4.jpg - NONE/www.xxxxxxx15.com application/javascript ALLOW_WBRS_17-AccessPolicy-CyberRange_Inside_NoAuth-RFS_Transparent_Proxy_Test-DefaultGroup-random_policy-RoutingPolicy - 486', +'{{ mark }}{{ bsd }} {{ host }} {{ wsatime }} 245 2001:b8f9:c5c2:f730::2 TCP_DENIED/403 0 GET http://test_web.net/users/user1.jpg Alexei_Romanov NONE/www.xxxxxxx6.com application/x-javascript BLOCK_WEBCAT_245-Allow_All_iDevices-CyberRange_Inside_NoAuth-OMSPolicy-DataSecurityPolicy-DefaultGroup-DIRECT -', +'{{ mark }}{{ bsd }} {{ host }} {{ wsatime }} 26 2001:44c4:cf35:1b78::6 TCP_MISS/204 4525 POST http://test_web.com/page1/a.txt Andy_Lloyd DIRECT/www.xxxxxxx3.com image/jpeg DEFAULT_CASE_26-NONE-CyberRange_Inside_NoAuth-OMSPolicy-DataSecurityPolicy-ExternalDLPolicy-RoutingPolicy "Anonymous_Suspect_Vendor" 100', +'{{ mark }}{{ bsd }} {{ host }} {{ wsatime }} 6 10.0.0.7 TCP_CLIENT_REFRESH_MISS/404 1932 GET http://test_web.com/page2/b.txt - DEFAULT_PARENT/www.xxxxxxx8.com - DEFAULT_CASE_6-AP_Subnet_2-NONE-RFS_Transparent_Proxy_Test-NONE-ExternalDLPolicy-RoutingPolicy <nc,5.0,-,"-",-,-,-,-,"-",-,-,-,"-",-,-,"-","-",-,-,nc,-,"-","-","Unknown","Unknown","-","-",0.63,0,-,"-","-",-,"-",-,-,"-","-"> - "03/Jan/2015:07:09:50 +1100" NONE -', +'{{ mark }}{{ bsd }} {{ host }} {{ wsatime }} 262 10.0.0.7 TCP_MISS_SSL/204 953 POST http://test_web.net/contents/content3.jpg Alexei_Romanov NONE/www.xxxxxxx10.com application/x-javascript DEFAULT_CASE_262-Internet_Access_with_Streaming-ID.ACMETECHISE-NONE-DefaultGroup-random_policy-RoutingPolicy "Anonymous_Suspect_Vendor" 123 "07/052020:11:29:10 +1332" NONE "Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.15"' ] @@ -40,19 +40,20 @@ def test_cisco_wsa_squid_11_7(record_property, setup_wordlist, get_host_key, set dt = datetime.datetime.now() iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) + wsatime = dt.strftime("%s.%f")[:-3] # Tune time functions - epoch = epoch[:-7] + epoch = epoch[:-3] mt = env.from_string(event + "\n") - message = mt.render(mark="<13>", bsd=bsd ,host=host ) + message = mt.render(mark="<13>", bsd=bsd ,host=host, wsatime=wsatime) sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) st = env.from_string( - "search index=netproxy sourcetype=\"cisco:wsa:squid:new\" _raw=\"{{ message }}\"") - message1 = mt.render(mark="", bsd="", host="") - search = st.render(host=host, message=message1.lstrip().replace('"','\\"')) + "search index=netproxy _time={{ epoch }} sourcetype=\"cisco:wsa:squid:new\" _raw=\"{{ message }}\"") + message1 = mt.render(mark="", bsd="", host="", wsatime=wsatime) + search = st.render(epoch=epoch, host=host, message=message1.lstrip().replace('"','\\"')) resultCount, eventCount = splunk_single(setup_splunk, search) record_property("host", host) @@ -67,19 +68,20 @@ def test_cisco_wsa_squid(record_property, setup_wordlist, get_host_key, setup_sp dt = datetime.datetime.now() iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) + wsatime = dt.strftime("%s.%f")[:-3] # Tune time functions - epoch = epoch[:-7] + epoch = epoch[:-3] mt = env.from_string(event + "\n") - message = mt.render(mark="<13>", bsd=bsd ,host=host ) + message = mt.render(mark="<13>", bsd=bsd ,host=host, wsatime=wsatime) sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) st = env.from_string( - "search index=netproxy sourcetype=\"cisco:wsa:squid\" _raw=\"{{ message }}\"") - message1 = mt.render(mark="", bsd="", host="") - search = st.render(host=host, message=message1.lstrip().replace('"','\\"')) + "search index=netproxy _time={{ epoch }} sourcetype=\"cisco:wsa:squid\" _raw=\"{{ message }}\"") + message1 = mt.render(mark="", bsd="", host="", wsatime=wsatime) + search = st.render(epoch=epoch, host=host, message=message1.lstrip().replace('"','\\"')) resultCount, eventCount = splunk_single(setup_splunk, search) record_property("host", host) diff --git a/tests/test_citrix_netscaler.py b/tests/test_citrix_netscaler.py index 812c7df..f212771 100644 --- a/tests/test_citrix_netscaler.py +++ b/tests/test_citrix_netscaler.py @@ -16,9 +16,11 @@ env = Environment() -#<12> 01/10/2001:01:01:01 GMT netscaler ABC-D : SSLVPN HTTPREQUEST 1234567 : Context username@192.0.2.1 - SessionId: 12345- example.com User username : Group(s) groupname : Vserver a1b2:c3d4:e5f6:a7b8:c9d0:e1f2:a3b4:c5d6:123 - 01/01/2001:01:01:01 GMT GET file/path.gif - - +# <12> 01/10/2001:01:01:01 GMT netscaler ABC-D : SSLVPN HTTPREQUEST 1234567 : Context username@192.0.2.1 - SessionId: 12345- example.com User username : Group(s) groupname : Vserver a1b2:c3d4:e5f6:a7b8:c9d0:e1f2:a3b4:c5d6:123 - 01/01/2001:01:01:01 GMT GET file/path.gif - - def test_citrix_netscaler(record_property, setup_wordlist, setup_splunk, setup_sc4s): - host = "test-ctitrixns-{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist)) + host = "test-ctitrixns-{}-{}".format( + random.choice(setup_wordlist), random.choice(setup_wordlist) + ) pid = random.randint(1000, 32000) dt = datetime.datetime.now() @@ -28,12 +30,18 @@ def test_citrix_netscaler(record_property, setup_wordlist, setup_splunk, setup_s time = dt.strftime("%d/%m/%Y:%H:%M:%S") epoch = epoch[:-7] - mt = env.from_string("{{ mark }} {{ time }} {{ tzname }} {{ host }} ABC-D : SSLVPN HTTPREQUEST 1234567 : Context username@192.0.2.1 - SessionId: 12345- example.com User username : Group(s) groupname : Vserver a1b2:c3d4:e5f6:a7b8:c9d0:e1f2:a3b4:c5d6:123 - 01/01/2001:01:01:01 GMT GET file/path.gif - -\n") - message = mt.render(mark="<12>", bsd=bsd, time=time, tzname=tzname, host=host, pid=pid) + mt = env.from_string( + "{{ mark }} {{ time }} {{ tzname }} {{ host }} ABC-D : SSLVPN HTTPREQUEST 1234567 : Context username@192.0.2.1 - SessionId: 12345- example.com User username : Group(s) groupname : Vserver a1b2:c3d4:e5f6:a7b8:c9d0:e1f2:a3b4:c5d6:123 - 01/01/2001:01:01:01 GMT GET file/path.gif - -\n" + ) + message = mt.render( + mark="<12>", bsd=bsd, time=time, tzname=tzname, host=host, pid=pid + ) sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) - st = env.from_string("search _time={{ epoch }} index=netfw host={{ host }} sourcetype=\"citrix:netscaler:syslog\"") + st = env.from_string( + 'search _time={{ epoch }} index=netfw host={{ host }} sourcetype="citrix:netscaler:syslog"' + ) search = st.render(epoch=epoch, host=host, pid=pid) resultCount, eventCount = splunk_single(setup_splunk, search) @@ -45,9 +53,13 @@ def test_citrix_netscaler(record_property, setup_wordlist, setup_splunk, setup_s assert resultCount == 1 -#<134>Jun 18 18:18:42 svm_service: 1.1.1.1 18/06/2020:16:18:42 GMT : GUI CMD_EXECUTED : User nsroot - Remote_ip 10.55.1.100 - Command "login login tenant_name=Owner,password=***********,challenge_response=***********,token=1c81504d124245d,client_port=-1,cert_verified=false,sessionid=***********,session_timeout=900,permission=superuser" - Status "Done" -def test_citrix_netscaler_sdx(record_property, setup_wordlist, setup_splunk, setup_sc4s): - host = "test-ctitrixns-{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist)) +# <134>Jun 18 18:18:42 svm_service: 1.1.1.1 18/06/2020:16:18:42 GMT : GUI CMD_EXECUTED : User nsroot - Remote_ip 10.55.1.100 - Command "login login tenant_name=Owner,password=***********,challenge_response=***********,token=1c81504d124245d,client_port=-1,cert_verified=false,sessionid=***********,session_timeout=900,permission=superuser" - Status "Done" +def test_citrix_netscaler_sdx( + record_property, setup_wordlist, setup_splunk, setup_sc4s +): + host = "test-ctitrixns-{}-{}".format( + random.choice(setup_wordlist), random.choice(setup_wordlist) + ) pid = random.randint(1000, 32000) dt = datetime.datetime.now() @@ -57,12 +69,18 @@ def test_citrix_netscaler_sdx(record_property, setup_wordlist, setup_splunk, set time = dt.strftime("%d/%m/%Y:%H:%M:%S") epoch = epoch[:-7] - mt = env.from_string('{{ mark }}{{ bsd }} svm_service: {{ host }} {{ time }} GMT : GUI CMD_EXECUTED : User nsroot - Remote_ip 10.1.1.1 - Command "login login tenant_name=Owner,password=***********,challenge_response=***********,token=1c81504d124245d,client_port=-1,cert_verified=false,sessionid=***********,session_timeout=900,permission=superuser" - Status "Done"\n') - message = mt.render(mark="<12>", bsd=bsd, time=time, tzname=tzname, host=host, pid=pid) + mt = env.from_string( + '{{ mark }}{{ bsd }} svm_service: {{ host }} {{ time }} GMT : GUI CMD_EXECUTED : User nsroot - Remote_ip 10.1.1.1 - Command "login login tenant_name=Owner,password=***********,challenge_response=***********,token=1c81504d124245d,client_port=-1,cert_verified=false,sessionid=***********,session_timeout=900,permission=superuser" - Status "Done"\n' + ) + message = mt.render( + mark="<12>", bsd=bsd, time=time, tzname=tzname, host=host, pid=pid + ) sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) - st = env.from_string("search _time={{ epoch }} index=netfw host={{ host }} sourcetype=\"citrix:netscaler:syslog\"") + st = env.from_string( + 'search _time={{ epoch }} index=netfw host={{ host }} sourcetype="citrix:netscaler:syslog"' + ) search = st.render(epoch=epoch, host=host, pid=pid) resultCount, eventCount = splunk_single(setup_splunk, search) @@ -71,4 +89,44 @@ def test_citrix_netscaler_sdx(record_property, setup_wordlist, setup_splunk, set record_property("resultCount", resultCount) record_property("message", message) - assert resultCount == 1 \ No newline at end of file + assert resultCount == 1 + + +# [289]: AAA Message : In receive_ldap_user_search_event: ldap_first_entry returned null, user ssgconfig not found +def test_citrix_netscaler_sdx_AAA( + record_property, setup_wordlist, setup_splunk, setup_sc4s +): + host = "test-ctitrixns-{}-{}".format( + random.choice(setup_wordlist), random.choice(setup_wordlist) + ) + pid = random.randint(1000, 32000) + + dt = datetime.datetime.now() + iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) + + # Tune time functions + time = dt.strftime("%d/%m/%Y:%H:%M:%S") + epoch = epoch[:-7] + + mt = env.from_string( + "{{ mark }}{{ bsd }} [289]: AAA Message : In receive_ldap_user_search_event: ldap_first_entry returned null, user {{ host }} not found\n" + ) + message = mt.render( + mark="<12>", bsd=bsd, time=time, tzname=tzname, host=host, pid=pid + ) + + sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) + + st = env.from_string( + 'search _time={{ epoch }} index=netfw {{ host }} sourcetype="citrix:netscaler:syslog"' + ) + search = st.render(epoch=epoch, host=host, pid=pid) + + resultCount, eventCount = splunk_single(setup_splunk, search) + + record_property("host", host) + record_property("resultCount", resultCount) + record_property("message", message) + + assert resultCount == 1 + diff --git a/tests/test_common.py b/tests/test_common.py index b0b65b1..483f543 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -102,85 +102,6 @@ def test_fallback(record_property, setup_wordlist, setup_splunk, setup_sc4s): assert resultCount == 1 -# - -def test_fix_dns_context(record_property, setup_wordlist, setup_splunk, setup_sc4s): - host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist)) - pid = random.randint(1000, 32000) - - dt = datetime.datetime.now() - iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) - - # Tune time functions - epoch = epoch[:-7] - - mt = env.from_string("{{ mark }} {{ bsd }} 169.254.0.2 dnstest[{{ pid }}]: {{ host }}\n") - message = mt.render(mark="<111>", bsd=bsd, host=host, pid=pid) - - sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) - - st = env.from_string("search _time={{ epoch }} host=foo.example index=osnix \"[{{ pid }}]\" {{ host }} sourcetype=\"nix:syslog\"") - search = st.render(epoch=epoch, pid=pid, host=host) - - resultCount, eventCount = splunk_single(setup_splunk, search) - - record_property("host", host) - record_property("resultCount", resultCount) - record_property("message", message) - - assert resultCount == 1 -def test_fix_dns(record_property, setup_wordlist, setup_splunk, setup_sc4s): - host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist)) - pid = random.randint(1000, 32000) - - dt = datetime.datetime.now() - iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) - - # Tune time functions - epoch = epoch[:-7] - - mt = env.from_string("{{ mark }} {{ bsd }} 8.8.4.4 dnstest[{{ pid }}]: {{ host }}\n") - message = mt.render(mark="<111>", bsd=bsd, host=host, pid=pid) - - sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) - - st = env.from_string("search _time={{ epoch }} host=dns index=osnix \"[{{ pid }}]\" {{ host }} sourcetype=\"nix:syslog\"") - search = st.render(epoch=epoch, pid=pid, host=host) - - resultCount, eventCount = splunk_single(setup_splunk, search) - - record_property("host", host) - record_property("resultCount", resultCount) - record_property("message", message) - - assert resultCount == 1 - -def test_fix_dns_notfound(record_property, setup_wordlist, setup_splunk, setup_sc4s): - host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist)) - pid = random.randint(1000, 32000) - - dt = datetime.datetime.now() - iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) - - # Tune time functions - epoch = epoch[:-7] - - mt = env.from_string("{{ mark }} {{ bsd }} 169.254.0.1 dnstest[{{ pid }}]: {{ host }}\n") - message = mt.render(mark="<111>", bsd=bsd, host=host, pid=pid) - - sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) - - st = env.from_string("search _time={{ epoch }} host=169.254.0.1 index=osnix \"[{{ pid }}]\" {{ host }} sourcetype=\"nix:syslog\"") - search = st.render(epoch=epoch, pid=pid, host=host) - - resultCount, eventCount = splunk_single(setup_splunk, search) - - record_property("host", host) - record_property("resultCount", resultCount) - record_property("message", message) - - assert resultCount == 1 - def test_metrics(record_property, setup_wordlist, setup_splunk, setup_sc4s): st = env.from_string( diff --git a/tests/test_dell_rsa_secureid.py b/tests/test_dell_rsa_secureid.py index a67f34a..e19e16e 100644 --- a/tests/test_dell_rsa_secureid.py +++ b/tests/test_dell_rsa_secureid.py @@ -31,10 +31,10 @@ def test_dell_rsa_secureid_admin( dt = datetime.datetime.now() iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) - rsatime = dt.strftime("%H:%M:%S,%f") + rsatime = dt.strftime("%H:%M:%S,%f")[:-3] # Tune time functions - epoch = epoch[:-7] + epoch = epoch[:-3] mt = env.from_string(event + "\n") message = mt.render(mark="<166>", bsd=bsd, host=host, date=date, rsatime=rsatime) @@ -65,10 +65,10 @@ def test_dell_rsa_secureid_system( dt = datetime.datetime.now() iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) - rsatime = dt.strftime("%H:%M:%S,%f") + rsatime = dt.strftime("%H:%M:%S,%f")[:-3] # Tune time functions - epoch = epoch[:-7] + epoch = epoch[:-3] mt = env.from_string(event + "\n") message = mt.render(mark="<166>", bsd=bsd, host=host, date=date, rsatime=rsatime) @@ -99,10 +99,10 @@ def test_dell_rsa_secureid_runtime( dt = datetime.datetime.now() iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) - rsatime = dt.strftime("%H:%M:%S,%f") + rsatime = dt.strftime("%H:%M:%S,%f")[:-3] # Tune time functions - epoch = epoch[:-7] + epoch = epoch[:-3] mt = env.from_string(event + "\n") message = mt.render(mark="<166>", bsd=bsd, host=host, date=date, rsatime=rsatime) @@ -147,13 +147,12 @@ def test_dell_rsa_secureid_trace( ] dt = datetime.datetime.now() iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt) - rsatime = dt.strftime("%H:%M:%S,%f") # Tune time functions epoch = epoch[:-7] for event in events: mt = env.from_string(event + "\n") - message = mt.render(mark="<166>", bsd=bsd, host=host, date=date, rsatime=rsatime) + message = mt.render(mark="<166>", bsd=bsd, host=host, date=date) sendsingle(message, setup_sc4s[0], setup_sc4s[1][514]) st = env.from_string(