| {{/* |
| # Copyright © 2018 AT&T, Amdocs, Bell Canada Intellectual Property. All rights reserved. |
| # |
| # Licensed under the Apache License, Version 2.0 (the "License"); |
| # you may not use this file except in compliance with the License. |
| # You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, software |
| # distributed under the License is distributed on an "AS IS" BASIS, |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| # See the License for the specific language governing permissions and |
| # limitations under the License. |
| */}} |
| input { |
| beats { |
| |
| ## Add a id to plugin configuration. Can be anything unique. |
| id => 'beats_plugin' |
| |
| ######## Connection configurations ######## |
| |
| ## The port to listen on. |
| port => {{.Values.service.externalPort}} |
| |
| ## Close Idle clients after the specified time in seconds. Default is 60 seconds |
| #client_inactivity_timeout => 60 |
| |
| ######## Security configurations ######## |
| |
| ## Enable encryption. Default false. |
| #ssl => $filebeat_ssl |
| |
| ## ssl certificate path. |
| #ssl_certificate => $filebeat_ssl_certificate |
| |
| ## SSL key to use. |
| #ssl_key => $filebeat_ssl_key |
| |
| ##SSL key passphrase to use. |
| #ssl_key_passphrase => $filebeat_ssl_key_passphrase |
| |
| ## Value can be any of: none, peer, force_peer. |
| #ssl_verify_mode => $filebeat_ssl_verify_mode |
| |
| ## Time in milliseconds for an incomplete ssl handshake to timeout. Default is 10000 ms. |
| #ssl_handshake_timeout => 10000 |
| include_codec_tag => false |
| } |
| } |
| |
| |
| filter { |
| grok { |
| break_on_match => false |
| match => { |
| "source" => ["/var/log/onap/(?<componentName>[^/]+)/", |
| "/var/log/onap/%{GREEDYDATA:componentLogFile}" |
| ] |
| } |
| } |
| |
| # Filter for log4j xml events |
| if "</log4j:event>" in [message] { |
| |
| #mutate { add_field => { "orgmsg_log4j" => "%{message}" } } # Copy of orginal msg for debug |
| |
| #Filter to parse xml event and retrieve data |
| xml { |
| source => "message" |
| store_xml => false |
| remove_namespaces => true |
| target => "xml_content" |
| xpath => [ "/event/message/text()", "logmsg" , |
| "/event/@logger", "Logger", |
| "/event/@timestamp", "Timestamp", |
| "/event/@level", "loglevel", |
| "/event/@thread", "Thread", |
| "/event/throwable/text()", "Exceptionthrowable", |
| "/event/NDC/text()", "NDCs", |
| "/event/properties/data/@name","mdcname", |
| "/event/properties/data/@value","mdcvalue"] |
| |
| } |
| |
| #Ruby filter to iterate and separate MDCs into documents |
| ruby { |
| code => ' |
| $i = 0 |
| $num = 0 |
| if event.get("[mdcname]") |
| $num = event.get("[mdcname]").length |
| end |
| if $num != 0 |
| until $i > $num do |
| if event.get("[mdcname]").at($i) and event.get("[mdcvalue]").at($i) |
| event.set(event.get("[mdcname]").at($i), event.get("[mdcvalue]").at($i)) |
| end |
| $i=$i+1 |
| end |
| end |
| ' |
| } |
| |
| #Validations |
| if [Exceptionthrowable] |
| { |
| mutate { |
| replace => { |
| "exceptionmessage" => "%{[Exceptionthrowable]}" |
| } |
| } |
| } |
| |
| if [NDCs] |
| { |
| mutate { |
| replace => { |
| "NDC" => "%{[NDCs]}" |
| } |
| } |
| } |
| |
| mutate { |
| replace => { |
| "Logger" =>"%{[Logger]}" |
| "logmsg" =>"%{[logmsg]}" |
| "Timestamp" =>"%{[Timestamp]}" |
| "loglevel" =>"%{[loglevel]}" |
| "message" => "%{logmsg}" |
| "Thread" => "%{[Thread]}" |
| } |
| remove_field => ["mdcname", "mdcvalue", "logmsg","Exceptionthrowable","NDCs"] |
| } |
| |
| if [Timestamp] |
| { |
| date { |
| match => ["Timestamp", "UNIX_MS"] |
| target => "Timestamp" |
| } |
| } |
| } |
| # Filter for logback events |
| else { |
| |
| #mutate { add_field => { "orgmsg" => "%{message}" } } # Copy of orginal msg for debug |
| |
| mutate { |
| gsub => [ |
| 'message', ' = ', '=', |
| 'message', '= ', '=null', |
| 'message', '=\t', '=null\t', #This null is followed by a tab |
| 'message', '\t$', '\t' |
| ] |
| } |
| # The grok below parses the message field for all current logback patterns used by oom components. |
| # Example logback pattern: %d{"yyyy-MM-dd'T'HH:mm:ss.SSSXXX", UTC}|%X{RequestId}|%msg |
| # Example grok pattern: %{TIMESTAMP_ISO8601:Timestamp}\|%{UUID:RequestId}\|%{GREEDYDATA:message} |
| # Use the following command to find all logback patterns in oom directory: find oom -name "logback*xml" -exec grep "property.*attern.*value" {} \;|sort|uniq |
| grok { |
| match => { |
| "message" => [ |
| "%{TIMESTAMP_ISO8601:Timestamp}\\t[%{GREEDYDATA:Thread}]\\t%{GREEDYDATA:loglevel}\\t%{JAVACLASS:Logger}\\t%{GREEDYDATA:MDCs}\\t%{GREEDYDATA:message}", |
| "%{TIMESTAMP_ISO8601:BeginTimestamp}\|%{TIMESTAMP_ISO8601:EndTimestamp}\|%{UUID:RequestId}\|%{GREEDYDATA:ServiceInstanceId}\|%{GREEDYDATA:Thread}\|%{GREEDYDATA:Unknown1}\|%{GREEDYDATA:ServiceName}\|%{GREEDYDATA:PartnerName}\|%{GREEDYDATA:TargetEntity}\|%{GREEDYDATA:TargetServiceName}\|%{GREEDYDATA:StatusCode}\|%{GREEDYDATA:ResponseCode}\|%{GREEDYDATA:ResponseDesc}\|%{UUID:InstanceUUID}\|%{GREEDYDATA:loglevel}\|%{GREEDYDATA:AlertSeverity}\|%{IP:ServerIPAddress}\|%{GREEDYDATA:Timer}\|%{HOSTNAME:ServerFQDN}\|%{IPORHOST:RemoteHost}\|%{GREEDYDATA:Unknown2}\|%{GREEDYDATA:Unknown3}\|%{GREEDYDATA:Unknown4}\|%{GREEDYDATA:TargetVirtualEntity}\|%{GREEDYDATA:Unknown5}\|%{GREEDYDATA:Unknown6}\|%{GREEDYDATA:Unknown7}\|%{GREEDYDATA:Unknown8}\|%{GREEDYDATA:message}", |
| "%{TIMESTAMP_ISO8601:BeginTimestamp}\|%{TIMESTAMP_ISO8601:EndTimestamp}\|%{UUID:RequestId}\|%{GREEDYDATA:ServiceInstanceId}\|%{GREEDYDATA:Thread}\|%{GREEDYDATA:Unknown1}\|%{GREEDYDATA:ServiceName}\|%{GREEDYDATA:PartnerName}\|%{GREEDYDATA:StatusCode}\|%{GREEDYDATA:ResponseCode}\|%{GREEDYDATA:ResponseDesc}\|%{UUID:InstanceUUID}\|%{GREEDYDATA:loglevel}\|%{GREEDYDATA:AlertSeverity}\|%{IP:ServerIPAddress}\|%{GREEDYDATA:Timer}\|%{HOSTNAME:ServerFQDN}\|%{IPORHOST:RemoteHost}\|%{GREEDYDATA:Unknown2}\|%{GREEDYDATA:Unknown3}\|%{GREEDYDATA:Unknown4}\|%{GREEDYDATA:Unknown5}\|%{GREEDYDATA:Unknown6}\|%{GREEDYDATA:Unknown7}\|%{GREEDYDATA:Unknown8}\|%{GREEDYDATA:message}", |
| "%{TIMESTAMP_ISO8601:Timestamp}\|%{UUID:RequestId}\|%{GREEDYDATA:ServiceInstanceId}\|%{GREEDYDATA:Thread}\|%{GREEDYDATA:ServiceName}\|%{UUID:InstanceUUID}\|%{GREEDYDATA:loglevel}\|%{GREEDYDATA:AlertSeverity}\|%{IP:ServerIPAddress}\|%{HOSTNAME:ServerFQDN}\|%{IPORHOST:RemoteHost}\|%{GREEDYDATA:Timer}\|\[%{GREEDYDATA:caller}\]\|%{GREEDYDATA:message}", |
| "%{TIMESTAMP_ISO8601:Timestamp}\|%{GREEDYDATA:RequestId}\|%{GREEDYDATA:Thread}\|%{GREEDYDATA:ServiceName}\|%{GREEDYDATA:PartnerName}\|%{GREEDYDATA:TargetEntity}\|%{GREEDYDATA:TargetServiceName}\|%{GREEDYDATA:loglevel}\|%{GREEDYDATA:ErrorCode}\|%{GREEDYDATA:ErrorDesc}\|%{GREEDYDATA:message}", |
| "%{TIMESTAMP_ISO8601:Timestamp}\|%{GREEDYDATA:RequestId}\|%{GREEDYDATA:Thread}\|%{GREEDYDATA:ClassName}\|%{GREEDYDATA:message}", |
| "%{TIMESTAMP_ISO8601:Timestamp}\|%{UUID:RequestId}\|%{GREEDYDATA:message}", |
| "\[%{TIMESTAMP_ISO8601:Timestamp}\|%{LOGLEVEL:loglevel}\|%{GREEDYDATA:Logger}\|%{GREEDYDATA:Thread}\] %{GREEDYDATA:message}" |
| ] |
| } |
| overwrite => ["message"] |
| } |
| # The MDCs are key value pairs that are seperated by "," or "\t". Extra space characters are trimmed from the keys and values. |
| kv { |
| source => "MDCs" |
| field_split => ",\t" |
| trim_key => "\s" |
| trim_value => "\s" |
| remove_field => [ "MDCs" ] |
| } |
| |
| if (![Timestamp] and [EndTimestamp]) { |
| mutate { add_field => { "Timestamp" => "%{EndTimestamp}" } } |
| } |
| date { |
| match => [ "Timestamp", "ISO8601", "yyyy-MM-dd HH:mm:ss,SSS" ] |
| target => "Timestamp" |
| } |
| |
| mutate { |
| remove_field => ["DuplicateRequestID", "Unknown1", "Unknown2", "Unknown3", "Unknown4", "Unknown5", "Unknown6", "Unknown7", "Unknown8"] |
| } |
| |
| if ([source] == "/var/log/onap/sdc/sdc-be/audit.log") { |
| #Parse kvps in message |
| kv { |
| field_split => "\s" |
| trim_key => "\s" |
| trim_value => "\s" |
| } |
| |
| #If Request Id is missing and DID is present use as RequestId |
| if (![RequestId] and [DID] =~ /.+/) { |
| mutate { add_field => { "RequestId" => "%{DID}" } } |
| } |
| } |
| |
| } #Close else statement for logback events |
| } #Close filter |
| |
| |
| output { |
| elasticsearch { |
| id => 'onap_es' |
| |
| ######### Security configurations ######### |
| |
| user => "elastic" |
| password => "changeme" |
| |
| ## The .cer or .pem file to validate the server's certificate |
| #cacert => $es_cacert |
| |
| ## The keystore used to present a certificate to the server. It can be either .jks or .p12 |
| #keystore => $es_keystore |
| #keystore_password => $es_keystore_password |
| |
| ## Enable SSL/TLS secured communication to Elasticsearch cluster. |
| ## Default is not set which in that case depends on the protocol specidfied in hosts list |
| #ssl => $es_ssl |
| |
| ## Option to validate the server's certificate. Default is true |
| #ssl_certificate_verification => $es_ssl_certificate_verification |
| |
| ## The JKS truststore to validate the server's certificate. |
| #truststore => $es_truststore |
| #truststore_password => $es_truststore_password |
| |
| |
| ######### Elasticsearchcluster and host configurations ######### |
| |
| ##can specify one or a list of hosts. If sniffing is set, one is enough and others will be auto-discovered |
| hosts => ["http://{{.Values.config.elasticsearchServiceName}}.{{.Release.Namespace}}:{{.Values.config.elasticsearchPort}}"] |
| |
| |
| ## This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. Default is false. |
| sniffing => true |
| |
| ## How long to wait, in seconds, between sniffing attempts. Default is 5 seconds. |
| #sniffing_delay => 5 |
| |
| ## Set the address of a forward HTTP proxy. |
| #proxy => $es_proxy |
| |
| ##Use this if you must run Elasticsearch behind a proxy that remaps the root path for the Elasticsearch HTTP API lives |
| #path => $es_path |
| |
| ######### Elasticsearch request configurations ######### |
| |
| ## This setting defines the maximum sized bulk request Logstash will make. |
| #flush_size => ? |
| |
| ######### Document configurations ######### |
| |
| index => "logstash-%{+YYYY.MM.dd}" |
| document_type => "logs" |
| |
| ## This can be used to associate child documents with a parent using the parent ID. |
| #parent => "abcd' |
| } |
| } |
| |