diff Kibana-external-config-patch @ 26:610835fb4209

external configuration for kibana
author Carl Byington <carl@five-ten-sg.com>
date Fri, 03 May 2013 08:04:08 -0700
parents
children
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Kibana-external-config-patch	Fri May 03 08:04:08 2013 -0700
@@ -0,0 +1,93 @@
+--- KibanaConfig.rb	2013-05-02 15:22:03.011877891 -0700
++++ KibanaConfig.new.rb	2013-05-02 15:26:57.419447970 -0700
+@@ -3,26 +3,14 @@
+   # Your elastic search server(s). This may be set as an array for round robin
+   # load balancing
+   # Elasticsearch = ["elasticsearch1:9200","elasticsearch2:9200"]
+-  Elasticsearch = "localhost:9200"
++  Elasticsearch = ENV['KIBANA_ES'] ? ENV['KIBANA_ES'] : "localhost:9200"
+ 
+   #Set the Net::HTTP read/open timeouts for the connection to the ES backend
+   ElasticsearchTimeout = 500
+ 
+-  # The port Kibana should listen on
+-  KibanaPort = 5601
+-
+-  # The adress ip Kibana should listen on. Comment out or set to
+-  # 0.0.0.0 to listen on all interfaces.
+-  KibanaHost = '127.0.0.1'
+-  
+-  # Below is an example showing how to configure the same variables
+-  # using environment variables, which can be set in an init script
+-  # es_ip = ENV['ES_IP'] ?  ENV['ES_IP'] : '127.0.0.1'
+-  # es_port = ENV['ES_PORT'] ?  ENV['ES_PORT'] : 9200
+-  # Elasticsearch = "#{es_ip}:#{es_port}"
+-  # KibanaPort = ENV['KIBANA_PORT'] ? ENV['KIBANA_PORT'] : 5601
+-  # KibanaHost = ENV['KIBANA_HOST'] ? ENV['KIBANA_HOST'] : 'localhost'
+-  
++  # The port and adress ip Kibana should listen on.
++  KibanaPort = ENV['KIBANA_PORT'] ? ENV['KIBANA_PORT'] : 5601
++  KibanaHost = ENV['KIBANA_HOST'] ? ENV['KIBANA_HOST'] : 'localhost'
+ 
+   # The record type as defined in your logstash configuration.
+   # Seperate multiple types with a comma, no spaces. Leave blank
+@@ -44,19 +32,19 @@
+   # Do not use isoUtcDatetime or the "UTC:" prefix described in the above
+   # article, as timezone correction is already performed by the "Timezone"
+   # config variable.
+-  # Time_format = 'isoDateTime' 
++  # Time_format = 'isoDateTime'
+   Time_format = 'mm/dd HH:MM:ss'
+ 
+   # Change which fields are shown by default. Must be set as an array
+   # Default_fields = ['@fields.vhost','@fields.response','@fields.request']
+   Default_fields = ['@message']
+ 
+-  # If set to true, Kibana will use the Highlight feature of Elasticsearch to 
++  # If set to true, Kibana will use the Highlight feature of Elasticsearch to
+   # display highlighted search results
+-  Highlight_results = true
++  Highlight_results = false
+ 
+-  # A field needs to be specified for the highlight feature. By default, 
+-  # Elasticsearch doesn't allow highlighting on _all because the field has to 
++  # A field needs to be specified for the highlight feature. By default,
++  # Elasticsearch doesn't allow highlighting on _all because the field has to
+   # be either stored or part of the _source field.
+   Highlighted_field = "@message"
+ 
+@@ -99,18 +87,18 @@
+   # indexing
+   Smart_index = true
+ 
+-  # You can define your custom pattern here for index names if you 
+-  # use something other than daily indexing. Pattern needs to have 
+-  # date formatting like '%Y.%m.%d'.  Will accept an array of smart 
+-  # indexes.  
+-  # Smart_index_pattern = ['logstash-web-%Y.%m.%d', 'logstash-mail-%Y.%m.%d'] 
++  # You can define your custom pattern here for index names if you
++  # use something other than daily indexing. Pattern needs to have
++  # date formatting like '%Y.%m.%d'.  Will accept an array of smart
++  # indexes.
++  # Smart_index_pattern = ['logstash-web-%Y.%m.%d', 'logstash-mail-%Y.%m.%d']
+   # Smart_index_pattern = 'logstash-%Y.%m.%d'
+   # here is an example of how to set the pattern using an environment variable
+   # Smart_index_pattern = ENV['SMART_INDEX'] ? ENV['SMART_INDEX'] : 'logstash-%Y.%m.%d'
+   Smart_index_pattern = 'logstash-%Y.%m.%d'
+-  
++
+   # Number of seconds between each index. 86400 = 1 day.
+-  Smart_index_step = 86400 
++  Smart_index_step = 86400
+ 
+   # ElasticSearch has a default limit on URL size for REST calls,
+   # so Kibana will fall back to _all if a search spans too many
+@@ -120,7 +108,7 @@
+ 
+   # Elasticsearch has an internal mechanism called "faceting" for performing
+   # analysis that we use for the "Stats" and "Terms" modes. However, on large
+-  # data sets/queries facetting can cause ES to crash if there isn't enough 
++  # data sets/queries facetting can cause ES to crash if there isn't enough
+   # memory available. It is suggested that you limit the number of indices that
+   # Kibana will use for the "Stats" and "Terms" to prevent ES crashes. For very
+   # large data sets and undersized ES clusers, a limit of 1 is not unreasonable.